forked from geldata/gel
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_server_compiler.py
503 lines (433 loc) · 18.8 KB
/
test_server_compiler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
import contextlib
import os
import pickle
import signal
import subprocess
import sys
import tempfile
import time
import unittest.mock
import uuid
import immutables
from edb import edgeql
from edb.testbase import lang as tb
from edb.testbase import server as tbs
from edb.pgsql import params as pg_params
from edb.server import args as edbargs
from edb.server import compiler as edbcompiler
from edb.server.compiler import rpc
from edb.server import config
from edb.server.compiler_pool import amsg
from edb.server.compiler_pool import pool
from edb.server.dbview import dbview
SHORT_WAIT = 5
LONG_WAIT = 60
class TestServerCompiler(tb.BaseSchemaLoadTest):
SCHEMA = '''
type Foo {
property bar -> str;
}
'''
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._std_schema = tb._load_std_schema()
def test_server_compiler_compile_edgeql_script(self):
compiler = tb.new_compiler()
context = edbcompiler.new_compiler_context(
compiler_state=compiler.state,
user_schema=self.schema,
modaliases={None: 'default'},
)
edbcompiler.compile_edgeql_script(
ctx=context,
eql='''
SELECT Foo {
bar
}
''',
)
class ServerProtocol(amsg.ServerProtocol):
def __init__(self):
self.connected = asyncio.Queue()
self.disconnected = asyncio.Queue()
self.pids = set()
def worker_connected(self, pid, version):
self.connected.put_nowait(pid)
self.pids.add(pid)
def worker_disconnected(self, pid):
self.disconnected.put_nowait(pid)
self.pids.remove(pid)
class TestAmsg(tbs.TestCase):
@contextlib.asynccontextmanager
async def compiler_pool(self, num_proc):
proto = ServerProtocol()
with tempfile.TemporaryDirectory() as td:
sock_name = f'{td}/compiler.sock'
server = amsg.Server(sock_name, self.loop, proto)
await server.start()
try:
proc = await asyncio.create_subprocess_exec(
sys.executable,
"-m", pool.WORKER_PKG + pool.BaseLocalPool._worker_mod,
"--sockname", sock_name,
"--numproc", str(num_proc),
"--version-serial", "1",
env=pool._ENV,
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
try:
yield server, proto, proc, sock_name
finally:
try:
proc.terminate()
await proc.wait()
except ProcessLookupError:
pass
finally:
await server.stop()
self.assertEqual(len(proto.pids), 0)
async def check_pid(self, pid, server):
conn = server.get_by_pid(pid)
resp = await conn.request(pickle.dumps(('not_exist', ())))
status, *data = pickle.loads(resp)
self.assertEqual(status, 1)
self.assertIsInstance(data[0], RuntimeError)
async def test_server_compiler_pool_restart(self):
pids = []
async with self.compiler_pool(2) as (server, proto, _proc, _sn):
# Make sure both compiler workers are up and ready
pid1 = await asyncio.wait_for(proto.connected.get(), LONG_WAIT)
pid2 = await asyncio.wait_for(proto.connected.get(), SHORT_WAIT)
await self.check_pid(pid1, server)
await self.check_pid(pid2, server)
# Worker killed with SIGTERM shall be restarted
os.kill(pid1, signal.SIGTERM)
pid = await asyncio.wait_for(proto.disconnected.get(), SHORT_WAIT)
pids.append(pid)
self.assertEqual(pid, pid1)
pid3 = await asyncio.wait_for(proto.connected.get(), SHORT_WAIT)
self.assertNotIn(pid3, (pid1, pid2))
await self.check_pid(pid3, server)
# Worker killed with SIGKILL shall be restarted
os.kill(pid2, signal.SIGKILL)
pid = await asyncio.wait_for(proto.disconnected.get(), SHORT_WAIT)
pids.append(pid)
self.assertEqual(pid, pid2)
pid4 = await asyncio.wait_for(proto.connected.get(), SHORT_WAIT)
self.assertNotIn(pid4, (pid1, pid2, pid3))
await self.check_pid(pid4, server)
# Worker killed with SIGINT shall NOT be restarted
os.kill(pid3, signal.SIGINT)
pid = await asyncio.wait_for(proto.disconnected.get(), SHORT_WAIT)
pids.append(pid)
self.assertEqual(pid, pid3)
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(proto.connected.get(), SHORT_WAIT)
# The only remaining worker should be terminated on exit
pid = await asyncio.wait_for(proto.disconnected.get(), SHORT_WAIT)
pids.append(pid)
# Make sure all the workers are gone
for pid in pids:
with self.assertRaises(OSError):
os.kill(pid, 0)
async def test_server_compiler_pool_template_proc_exit(self):
async with self.compiler_pool(2) as (server, proto, proc, _sn):
# Make sure both compiler workers are up and ready
pid1 = await asyncio.wait_for(proto.connected.get(), LONG_WAIT)
pid2 = await asyncio.wait_for(proto.connected.get(), SHORT_WAIT)
await self.check_pid(pid1, server)
await self.check_pid(pid2, server)
# Make sure the template process is ready to listen to signals
# by testing its restarting feature
pids = []
os.kill(pid1, signal.SIGTERM)
pid = await asyncio.wait_for(proto.disconnected.get(), SHORT_WAIT)
pids.append(pid)
self.assertEqual(pid, pid1)
pid3 = await asyncio.wait_for(proto.connected.get(), SHORT_WAIT)
self.assertNotIn(pid3, (pid1, pid2))
await self.check_pid(pid3, server)
# Kill the template process, it should kill all its children
proc.terminate()
await proc.wait()
for _ in range(2):
pid = await asyncio.wait_for(
proto.disconnected.get(), SHORT_WAIT)
pids.append(pid)
self.assertIn(pid1, pids)
self.assertIn(pid2, pids)
self.assertIn(pid3, pids)
# Make sure all the workers are gone
for pid in pids:
with self.assertRaises(OSError):
os.kill(pid, 0)
async def test_server_compiler_pool_server_exit(self):
async with self.compiler_pool(2) as (server, proto, proc, _sn):
# Make sure both compiler workers are up and ready
pid1 = await asyncio.wait_for(proto.connected.get(), LONG_WAIT)
pid2 = await asyncio.wait_for(proto.connected.get(), SHORT_WAIT)
await self.check_pid(pid1, server)
await self.check_pid(pid2, server)
await server.stop()
await asyncio.wait_for(proc.wait(), SHORT_WAIT)
pids = []
for _ in range(2):
pid = await asyncio.wait_for(
proto.disconnected.get(), SHORT_WAIT)
pids.append(pid)
self.assertIn(pid1, pids)
self.assertIn(pid2, pids)
# Make sure all the workers are gone
for pid in pids:
with self.assertRaises(OSError):
os.kill(pid, 0)
async def test_server_compiler_pool_no_socket(self):
async with self.compiler_pool(2) as (server, proto, proc, sn):
# Make sure both compiler workers are up and ready
pid1 = await asyncio.wait_for(proto.connected.get(), LONG_WAIT)
pid2 = await asyncio.wait_for(proto.connected.get(), SHORT_WAIT)
await self.check_pid(pid1, server)
await self.check_pid(pid2, server)
# Destroy the UNIX domain socket file
os.unlink(sn)
# Kill one worker, the template process will try to restart it
os.kill(pid1, signal.SIGTERM)
# But the new worker won't be able to connect to the UNIX socket,
# the template process should abort in a reasonable time with
# enough retries, depending on the number of CPU cores.
await asyncio.wait_for(proc.wait(), 30)
pids = []
while not proto.disconnected.empty():
pids.append(proto.disconnected.get_nowait())
# Make sure all the workers are gone
for pid in pids:
with self.assertRaises(OSError):
os.kill(pid, 0)
class TestServerCompilerPool(tbs.TestCase):
def _wait_pids(self, *pids, timeout=1):
remaining = list(pids)
start = time.monotonic()
while pids and time.monotonic() - start < timeout:
for pid in tuple(remaining):
try:
os.kill(pid, 0)
except OSError:
remaining.remove(pid)
if remaining:
time.sleep(0.1)
return remaining
def _kill_and_wait(self, *pids, sig=signal.SIGTERM, timeout=1):
for pid in pids:
os.kill(pid, sig)
remaining = self._wait_pids(*pids, timeout=timeout)
if remaining:
raise TimeoutError(
f"Failed to kill PID {remaining} with {sig} "
f"in {timeout} second(s)"
)
async def _get_worker_pids(self, sd, least_num=2, timeout=15):
rv = []
start = time.monotonic()
while time.monotonic() - start < timeout and len(rv) < least_num:
await asyncio.sleep(timeout / 50)
pool_info = sd.fetch_server_info()['compiler_pool']
rv = pool_info['worker_pids']
if len(rv) < least_num:
raise TimeoutError(
f"Not enough workers found in {timeout} second(s)"
)
return rv
def _get_template_pid(self, sd):
return sd.fetch_server_info()['compiler_pool']['template_pid']
async def test_server_compiler_pool_with_server(self):
async with tbs.start_edgedb_server(
compiler_pool_size=2,
compiler_pool_mode=edbargs.CompilerPoolMode.Fixed,
http_endpoint_security=(
edbargs.ServerEndpointSecurityMode.Optional),
) as sd:
self.assertEqual(sd.call_system_api('/server/status/ready'), 'OK')
pid1, pid2 = await self._get_worker_pids(sd)
data = sd.fetch_metrics()
self.assertRegex(
data, r'\nedgedb_server_compiler_processes_current 2.0\n'
)
# Terminate one worker, the server is still OK
self._kill_and_wait(pid1)
self.assertEqual(sd.call_system_api('/server/status/ready'), 'OK')
# Confirm that another worker is started
pids = set(await self._get_worker_pids(sd))
self.assertIn(pid2, pids)
pids.remove(pid2)
self.assertEqual(len(pids), 1)
pid3 = pids.pop()
# Kill both workers, the server would need some time to recover
os.kill(pid2, signal.SIGKILL)
os.kill(pid3, signal.SIGKILL)
time.sleep(0.1)
start = time.monotonic()
while time.monotonic() - start < 10:
try:
self.assertEqual(
sd.call_system_api('/server/status/ready'), 'OK'
)
except AssertionError:
time.sleep(0.1)
else:
break
pids = set(await self._get_worker_pids(sd))
self.assertNotIn(pid1, pids)
self.assertNotIn(pid2, pids)
self.assertNotIn(pid3, pids)
# Kill one worker with SIGINT, it's not restarted
self._kill_and_wait(pids.pop(), sig=signal.SIGINT, timeout=5)
time.sleep(1)
self.assertEqual(sd.call_system_api('/server/status/ready'), 'OK')
self.assertSetEqual(
set(await self._get_worker_pids(sd, least_num=1)), pids
)
pid4 = pids.pop()
# Kill the template process, the server shouldn't be
# impacted immediately
tmpl_pid1 = self._get_template_pid(sd)
os.kill(tmpl_pid1, signal.SIGKILL)
self.assertEqual(sd.call_system_api('/server/status/ready'), 'OK')
# When the new template process is started, it will spawn 2 new
# workers, and the old pid4 will then be killed.
self._wait_pids(pid4)
tmpl_pid2 = self._get_template_pid(sd)
self.assertIsNotNone(tmpl_pid2)
self.assertNotEqual(tmpl_pid1, tmpl_pid2)
self.assertEqual(sd.call_system_api('/server/status/ready'), 'OK')
# Make sure everything works again
start = time.monotonic()
while time.monotonic() - start < 10:
pids = await self._get_worker_pids(sd, timeout=10)
if pid4 not in pids:
break
self.assertNotIn(pid4, pids)
self.assertEqual(len(pids), 2)
self.assertEqual(sd.call_system_api('/server/status/ready'), 'OK')
class TestCompilerPool(tbs.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._std_schema = tb._load_std_schema()
result = tb._load_reflection_schema()
cls._refl_schema, _schema_class_layout = result
assert _schema_class_layout is not None
cls._schema_class_layout = _schema_class_layout
async def _test_pool_disconnect_queue(self, pool_class):
with tempfile.TemporaryDirectory() as td:
pool_ = await pool.create_compiler_pool(
runstate_dir=td,
pool_size=2,
backend_runtime_params=pg_params.get_default_runtime_params(),
std_schema=self._std_schema,
refl_schema=self._refl_schema,
schema_class_layout=self._schema_class_layout,
pool_class=pool_class,
dbindex=dbview.DatabaseIndex(
unittest.mock.MagicMock(),
std_schema=self._std_schema,
global_schema_pickle=pickle.dumps(None, -1),
sys_config={},
default_sysconfig=immutables.Map(),
sys_config_spec=config.load_spec_from_schema(
self._std_schema),
),
)
try:
w1 = await pool_._acquire_worker()
w2 = await pool_._acquire_worker()
with self.assertRaises(AttributeError):
await w1.call('nonexist')
with self.assertRaises(AttributeError):
await w2.call('nonexist')
pool_._release_worker(w1)
pool_._release_worker(w2)
pool_._ready_evt.clear()
os.kill(w1.get_pid(), signal.SIGTERM)
os.kill(w2.get_pid(), signal.SIGTERM)
await asyncio.wait_for(pool_._ready_evt.wait(), LONG_WAIT)
compiler = edbcompiler.new_compiler(
std_schema=self._std_schema,
reflection_schema=self._refl_schema,
schema_class_layout=self._schema_class_layout,
)
context = edbcompiler.new_compiler_context(
compiler_state=compiler.state,
user_schema=self._std_schema,
modaliases={None: 'default'},
)
orig_query = 'SELECT 123'
cfg_ser = compiler.state.compilation_config_serializer
request = rpc.CompilationRequest(
source=edgeql.Source.from_string(orig_query),
protocol_version=(1, 0),
schema_version=uuid.uuid4(),
compilation_config_serializer=cfg_ser,
implicit_limit=101,
)
await asyncio.gather(*(pool_.compile_in_tx(
None,
pickle.dumps(context.state.root_user_schema),
context.state.current_tx().id,
pickle.dumps(context.state),
0,
request.serialize(),
orig_query,
) for _ in range(4)))
finally:
await pool_.stop()
async def test_server_compiler_pool_disconnect_queue_fixed(self):
await self._test_pool_disconnect_queue(pool.FixedPool)
async def test_server_compiler_pool_disconnect_queue_adaptive(self):
await self._test_pool_disconnect_queue(pool.SimpleAdaptivePool)
def test_server_compiler_rpc_hash_eq(self):
compiler = edbcompiler.new_compiler(
std_schema=self._std_schema,
reflection_schema=self._refl_schema,
schema_class_layout=self._schema_class_layout,
)
def test(source: edgeql.Source):
cfg_ser = compiler.state.compilation_config_serializer
request1 = rpc.CompilationRequest(
source=source,
protocol_version=(1, 0),
schema_version=uuid.uuid4(),
compilation_config_serializer=cfg_ser,
)
request2 = rpc.CompilationRequest.deserialize(
request1.serialize(), "<unknown>", cfg_ser)
self.assertEqual(hash(request1), hash(request2))
self.assertEqual(request1, request2)
# schema_version affects the cache_key, hence the hash.
# But, it's not serialized so the 2 requests are still equal.
# This makes request2 a new key as being used in dicts.
request2.set_schema_version(uuid.uuid4())
self.assertNotEqual(hash(request1), hash(request2))
self.assertEqual(request1, request2)
test(edgeql.Source.from_string("SELECT 42"))
test(edgeql.NormalizedSource.from_string("SELECT 42"))