|
2 | 2 | from crate.client import connect
|
3 | 3 | from crate.client.exceptions import ProgrammingError
|
4 | 4 |
|
5 |
| -from crate.qa.tests import NodeProvider, insert_data, wait_for_active_shards, UpgradePath |
| 5 | +from crate.qa.tests import NodeProvider, insert_data, wait_for_active_shards, UpgradePath, assert_busy |
6 | 6 |
|
7 | 7 | ROLLING_UPGRADES_V4 = (
|
8 | 8 | # 4.0.0 -> 4.0.1 -> 4.0.2 don't support rolling upgrades due to a bug
|
|
40 | 40 | UpgradePath('5.7.x', '5.8.x'),
|
41 | 41 | UpgradePath('5.8.x', '5.9.x'),
|
42 | 42 | UpgradePath('5.9.x', '5.10.x'),
|
43 |
| - UpgradePath('5.10.x', '5.10'), |
| 43 | + #UpgradePath('5.10.x', '5.10'), |
44 | 44 | UpgradePath('5.10', 'latest-nightly'),
|
45 | 45 | )
|
46 | 46 |
|
47 | 47 |
|
48 | 48 | class RollingUpgradeTest(NodeProvider, unittest.TestCase):
|
49 | 49 |
|
| 50 | + def _num_docs(self, cursor, schema, table): |
| 51 | + cursor.execute("select sum(num_docs) from sys.shards where schema_name = ? and table_name = ?", (schema, table)) |
| 52 | + return cursor.fetchall()[0][0] |
| 53 | + |
| 54 | + def _assert_num_docs(self, cursor, schema, table, expected_count): |
| 55 | + count = self._num_docs(cursor, schema, table) |
| 56 | + self.assertEqual(expected_count, count) |
| 57 | + |
50 | 58 | def test_rolling_upgrade_4_to_5(self):
|
51 | 59 | print("") # force newline for first print
|
52 | 60 | for path in ROLLING_UPGRADES_V4:
|
@@ -88,6 +96,10 @@ def _test_rolling_upgrade(self, path, nodes):
|
88 | 96 | }
|
89 | 97 | cluster = self._new_cluster(path.from_version, nodes, settings=settings)
|
90 | 98 | cluster.start()
|
| 99 | + replica_cluster = None |
| 100 | + if int(path.from_version.split('.')[0]) >= 5 and int(path.from_version.split('.')[1]) >= 10: |
| 101 | + replica_cluster = self._new_cluster(path.from_version, 1, settings=settings, explicit_discovery=False) |
| 102 | + replica_cluster.start() |
91 | 103 | with connect(cluster.node().http_url, error_trace=True) as conn:
|
92 | 104 | c = conn.cursor()
|
93 | 105 | c.execute("create user arthur with (password = 'secret')")
|
@@ -152,6 +164,24 @@ def _test_rolling_upgrade(self, path, nodes):
|
152 | 164 | # Add the shards of the new partition primaries
|
153 | 165 | expected_active_shards += shards
|
154 | 166 |
|
| 167 | + # Set up tables for logical replications |
| 168 | + if int(path.from_version.split('.')[0]) >= 5 and int(path.from_version.split('.')[1]) >= 10: |
| 169 | + c.execute("create table doc.x (a int) clustered into 1 shards with (number_of_replicas=0)") |
| 170 | + expected_active_shards += 1 |
| 171 | + c.execute("create publication p for table doc.x") |
| 172 | + with connect(replica_cluster.node().http_url, error_trace=True) as replica_conn: |
| 173 | + rc = replica_conn.cursor() |
| 174 | + transport_port = cluster.node().addresses.transport.port |
| 175 | + replica_transport_port = replica_cluster.node().addresses.transport.port |
| 176 | + assert 4300 <= transport_port <= 4310 and 4300 <= replica_transport_port <= 4310 |
| 177 | + rc.execute("create table doc.rx (a int) clustered into 1 shards with (number_of_replicas=0)") |
| 178 | + rc.execute("create publication rp for table doc.rx") |
| 179 | + rc.execute(f"create subscription rs connection 'crate://localhost:{transport_port}?user=crate&sslmode=sniff' publication p") |
| 180 | + assert_busy(lambda: self._assert_num_docs(rc, "doc", "x", 0)) |
| 181 | + c.execute(f"create subscription s connection 'crate://localhost:{replica_transport_port}?user=crate&sslmode=sniff' publication rp") |
| 182 | + assert_busy(lambda: self._assert_num_docs(c, "doc", "rx", 0)) |
| 183 | + expected_active_shards += 1 |
| 184 | + |
155 | 185 | for idx, node in enumerate(cluster):
|
156 | 186 | # Enforce an old version node be a handler to make sure that an upgraded node can serve 'select *' from an old version node.
|
157 | 187 | # Otherwise upgraded node simply requests N-1 columns from old version with N columns and it always works.
|
@@ -282,6 +312,27 @@ def _test_rolling_upgrade(self, path, nodes):
|
282 | 312 | c.execute("select version['created'] from information_schema.table_partitions where table_name = 't3' and values['a'] = ?", [idx])
|
283 | 313 | self.assertEqual(c.fetchall(), [[partition_version]])
|
284 | 314 |
|
| 315 | + # Ensure logical replications works |
| 316 | + if int(path.from_version.split('.')[0]) >= 5 and int(path.from_version.split('.')[1]) >= 10: |
| 317 | + with connect(replica_cluster.node().http_url, error_trace=True) as replica_conn: |
| 318 | + rc = replica_conn.cursor() |
| 319 | + |
| 320 | + # Cannot drop replicated tables |
| 321 | + with self.assertRaises(ProgrammingError): |
| 322 | + rc.execute("drop table doc.x") |
| 323 | + c.execute("drop table doc.rx") |
| 324 | + |
| 325 | + count = self._num_docs(rc, "doc", "x") |
| 326 | + count2 = self._num_docs(c, "doc", "rx") |
| 327 | + |
| 328 | + c.execute("insert into doc.x values (1)") |
| 329 | + rc.execute("insert into doc.rx values (1)") |
| 330 | + |
| 331 | + rc.execute("select count(*) from doc.x") |
| 332 | + c.execute("select count(*) from doc.rx") |
| 333 | + assert_busy(lambda: self._assert_num_docs(rc, "doc", "x", count + 1)) |
| 334 | + assert_busy(lambda: self._assert_num_docs(c, "doc", "rx", count2 + 1)) |
| 335 | + |
285 | 336 | # Finally validate that all shards (primaries and replicas) of all partitions are started
|
286 | 337 | # and writes into the partitioned table while upgrading were successful
|
287 | 338 | with connect(cluster.node().http_url, error_trace=True) as conn:
|
|
0 commit comments