@@ -3,32 +3,16 @@ import sys
3
3
import os
4
4
import logging
5
5
import argparse
6
- import time
7
6
import json
8
- import subprocess
9
7
import unicodedata
10
8
11
9
sys .path .append (os .path .join (os .path .dirname (__file__ ), '..' , 'api' ))
12
10
import zulip
13
11
14
- from typing import Set , Optional
15
-
16
- def fetch_public_streams () -> Optional [Set [str ]]:
12
+ def write_public_streams () -> None :
17
13
public_streams = set ()
18
14
19
- try :
20
- res = zulip_client .get_streams (include_all_active = True )
21
- if res .get ("result" ) == "success" :
22
- streams = res ["streams" ]
23
- else :
24
- logging .error ("Error getting public streams:\n %s" % (res ,))
25
- return None
26
- except Exception :
27
- logging .exception ("Error getting public streams:" )
28
- return None
29
-
30
- for stream in streams :
31
- stream_name = stream ["name" ]
15
+ for stream_name in stream_names :
32
16
# Zephyr class names are canonicalized by first applying NFKC
33
17
# normalization and then lower-casing server-side
34
18
canonical_cls = unicodedata .normalize ("NFKC" , stream_name ).lower ()
@@ -41,7 +25,9 @@ def fetch_public_streams() -> Optional[Set[str]]:
41
25
42
26
public_streams .add (canonical_cls )
43
27
44
- return public_streams
28
+ with open ("/home/zulip/public_streams.tmp" , "w" ) as f :
29
+ f .write (json .dumps (list (public_streams )) + "\n " )
30
+ os .rename ("/home/zulip/public_streams.tmp" , "/home/zulip/public_streams" )
45
31
46
32
if __name__ == "__main__" :
47
33
log_file = "/home/zulip/sync_public_streams.log"
@@ -58,22 +44,50 @@ if __name__ == "__main__":
58
44
options = parser .parse_args ()
59
45
60
46
zulip_client = zulip .Client (client = "ZulipSyncPublicStreamsBot/0.1" )
47
+ backoff = zulip .RandomExponentialBackoff ()
61
48
62
- while True :
63
- # Sync every 5 minutes because this makes a very large network
64
- # request, due to Zephyr users who subscribe to 10K+ class
65
- # names generated by a script.
66
- #
67
- # This delay means we won't subscribe to new Zephyr classes
68
- # until 5 minutes after they are created in Zulip; this isn't
69
- # great but an acceptable tradeoff.
70
- time .sleep (300 )
71
- public_streams = fetch_public_streams ()
72
- if public_streams is None :
49
+ while backoff .keep_going ():
50
+ try :
51
+ res = zulip_client .register (event_types = ["stream" ])
52
+ if res ["result" ] != "success" :
53
+ backoff .fail ()
54
+ logger .error ("Error registering event queue:\n %r" , res )
55
+ continue
56
+ except Exception :
57
+ logger .exception ("Error registering event queue:" )
73
58
continue
74
59
75
- f = open ("/home/zulip/public_streams.tmp" , "w" )
76
- f .write (json .dumps (list (public_streams )) + "\n " )
77
- f .close ()
60
+ backoff .succeed ()
61
+ queue_id = res ["queue_id" ]
62
+ last_event_id = res ["last_event_id" ]
63
+ stream_names = {stream ["name" ] for stream in res ["streams" ]}
64
+ write_public_streams ()
65
+
66
+ while backoff .keep_going ():
67
+ try :
68
+ res = zulip_client .get_events (queue_id = queue_id , last_event_id = last_event_id )
69
+ if res ["result" ] != "success" :
70
+ backoff .fail ()
71
+ logger .error ("Error getting events:\n %r" , res )
72
+ if res ["result" ] == "error" :
73
+ # Break out to the outer loop to re-register the event queue.
74
+ break
75
+ continue
76
+ except Exception :
77
+ logger .exception ("Error getting events:" )
78
+ continue
78
79
79
- subprocess .call (["mv" , "/home/zulip/public_streams.tmp" , "/home/zulip/public_streams" ])
80
+ backoff .succeed ()
81
+ for event in res ["events" ]:
82
+ last_event_id = max (last_event_id , event ["id" ])
83
+ if event ["type" ] == "stream" :
84
+ if event ["op" ] == "create" :
85
+ stream_names .update (
86
+ stream ["name" ] for stream in event ["streams" ]
87
+ )
88
+ write_public_streams ()
89
+ elif event ["op" ] == "delete" :
90
+ stream_names .difference_update (
91
+ stream ["name" ] for stream in event ["streams" ]
92
+ )
93
+ write_public_streams ()
0 commit comments