From 06ab64f201dffcb93b826546e20be53cc712c8b8 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 10 Jan 2023 16:31:28 +0000 Subject: [PATCH 001/344] Implement MSC3925: changes to bundling of edits (#14811) Two parts to this: * Bundle the whole of the replacement with any edited events. This is backwards-compatible so I haven't put it behind a flag. * Optionally, inhibit server-side replacement of edited events. This has scope to break things, so it is currently disabled by default. --- changelog.d/14811.feature | 1 + synapse/config/experimental.py | 3 + synapse/events/utils.py | 31 +++-- synapse/server.py | 2 +- tests/rest/client/test_relations.py | 185 +++++++++++++++++++--------- 5 files changed, 159 insertions(+), 63 deletions(-) create mode 100644 changelog.d/14811.feature diff --git a/changelog.d/14811.feature b/changelog.d/14811.feature new file mode 100644 index 000000000000..87542835c3a2 --- /dev/null +++ b/changelog.d/14811.feature @@ -0,0 +1 @@ +Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 0f3870bfe18e..a8b2db372d56 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -139,3 +139,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC3391: Removing account data. self.msc3391_enabled = experimental.get("msc3391_enabled", False) + + # MSC3925: do not replace events with their edits + self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 13fa93afb87b..ae57a4df5ee8 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -403,6 +403,14 @@ class EventClientSerializer: clients. """ + def __init__(self, inhibit_replacement_via_edits: bool = False): + """ + Args: + inhibit_replacement_via_edits: If this is set to True, then events are + never replaced by their edits. + """ + self._inhibit_replacement_via_edits = inhibit_replacement_via_edits + def serialize_event( self, event: Union[JsonDict, EventBase], @@ -422,6 +430,8 @@ def serialize_event( into the event. apply_edits: Whether the content of the event should be modified to reflect any replacement in `bundle_aggregations[].replace`. + See also the `inhibit_replacement_via_edits` constructor arg: if that is + set to True, then this argument is ignored. Returns: The serialized event """ @@ -495,7 +505,8 @@ def _inject_bundled_aggregations( again for additional events in a recursive manner. serialized_event: The serialized event which may be modified. apply_edits: Whether the content of the event should be modified to reflect - any replacement in `aggregations.replace`. + any replacement in `aggregations.replace` (subject to the + `inhibit_replacement_via_edits` constructor arg). """ # We have already checked that aggregations exist for this event. @@ -518,15 +529,21 @@ def _inject_bundled_aggregations( if event_aggregations.replace: # If there is an edit, optionally apply it to the event. edit = event_aggregations.replace - if apply_edits: + if apply_edits and not self._inhibit_replacement_via_edits: self._apply_edit(event, serialized_event, edit) # Include information about it in the relations dict. - serialized_aggregations[RelationTypes.REPLACE] = { - "event_id": edit.event_id, - "origin_server_ts": edit.origin_server_ts, - "sender": edit.sender, - } + # + # Matrix spec v1.5 (https://spec.matrix.org/v1.5/client-server-api/#server-side-aggregation-of-mreplace-relationships) + # said that we should only include the `event_id`, `origin_server_ts` and + # `sender` of the edit; however MSC3925 proposes extending it to the whole + # of the edit, which is what we do here. + serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event( + edit, + time_now, + config=config, + apply_edits=False, + ) # Include any threaded replies to this event. if event_aggregations.thread: diff --git a/synapse/server.py b/synapse/server.py index 5baae2325e43..f4ab94c4f33c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -743,7 +743,7 @@ def get_oidc_handler(self) -> "OidcHandler": @cache_in_self def get_event_client_serializer(self) -> EventClientSerializer: - return EventClientSerializer() + return EventClientSerializer(self.config.experimental.msc3925_inhibit_edit) @cache_in_self def get_password_policy_handler(self) -> PasswordPolicyHandler: diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index b86f341ff5b4..c8a6911d5ee5 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -30,6 +30,7 @@ from tests.server import FakeChannel from tests.test_utils import make_awaitable from tests.test_utils.event_injection import inject_event +from tests.unittest import override_config class BaseRelationsTestCase(unittest.HomeserverTestCase): @@ -355,30 +356,67 @@ def test_ignore_invalid_room(self) -> None: self.assertEqual(200, channel.code, channel.json_body) self.assertNotIn("m.relations", channel.json_body["unsigned"]) + def _assert_edit_bundle( + self, event_json: JsonDict, edit_event_id: str, edit_event_content: JsonDict + ) -> None: + """ + Assert that the given event has a correctly-serialised edit event in its + bundled aggregations + + Args: + event_json: the serialised event to be checked + edit_event_id: the ID of the edit event that we expect to be bundled + edit_event_content: the content of that event, excluding the 'm.relates_to` + property + """ + relations_dict = event_json["unsigned"].get("m.relations") + self.assertIn(RelationTypes.REPLACE, relations_dict) + + m_replace_dict = relations_dict[RelationTypes.REPLACE] + for key in [ + "event_id", + "sender", + "origin_server_ts", + "content", + "type", + "unsigned", + ]: + self.assertIn(key, m_replace_dict) + + expected_edit_content = { + "m.relates_to": { + "event_id": event_json["event_id"], + "rel_type": "m.replace", + } + } + expected_edit_content.update(edit_event_content) + + self.assert_dict( + { + "event_id": edit_event_id, + "sender": self.user_id, + "content": expected_edit_content, + "type": "m.room.message", + }, + m_replace_dict, + ) + def test_edit(self) -> None: """Test that a simple edit works.""" new_body = {"msgtype": "m.text", "body": "I've been edited!"} + edit_event_content = { + "msgtype": "m.text", + "body": "foo", + "m.new_content": new_body, + } channel = self._send_relation( RelationTypes.REPLACE, "m.room.message", - content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + content=edit_event_content, ) edit_event_id = channel.json_body["event_id"] - def assert_bundle(event_json: JsonDict) -> None: - """Assert the expected values of the bundled aggregations.""" - relations_dict = event_json["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) - - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) - - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict - ) - # /event should return the *original* event channel = self.make_request( "GET", @@ -389,7 +427,7 @@ def assert_bundle(event_json: JsonDict) -> None: self.assertEqual( channel.json_body["content"], {"body": "Hi!", "msgtype": "m.text"} ) - assert_bundle(channel.json_body) + self._assert_edit_bundle(channel.json_body, edit_event_id, edit_event_content) # Request the room messages. channel = self.make_request( @@ -398,7 +436,11 @@ def assert_bundle(event_json: JsonDict) -> None: access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(self._find_event_in_chunk(channel.json_body["chunk"])) + self._assert_edit_bundle( + self._find_event_in_chunk(channel.json_body["chunk"]), + edit_event_id, + edit_event_content, + ) # Request the room context. # /context should return the edited event. @@ -408,7 +450,9 @@ def assert_bundle(event_json: JsonDict) -> None: access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - assert_bundle(channel.json_body["event"]) + self._assert_edit_bundle( + channel.json_body["event"], edit_event_id, edit_event_content + ) self.assertEqual(channel.json_body["event"]["content"], new_body) # Request sync, but limit the timeline so it becomes limited (and includes @@ -420,7 +464,11 @@ def assert_bundle(event_json: JsonDict) -> None: self.assertEqual(200, channel.code, channel.json_body) room_timeline = channel.json_body["rooms"]["join"][self.room]["timeline"] self.assertTrue(room_timeline["limited"]) - assert_bundle(self._find_event_in_chunk(room_timeline["events"])) + self._assert_edit_bundle( + self._find_event_in_chunk(room_timeline["events"]), + edit_event_id, + edit_event_content, + ) # Request search. channel = self.make_request( @@ -437,7 +485,45 @@ def assert_bundle(event_json: JsonDict) -> None: "results" ] ] - assert_bundle(self._find_event_in_chunk(chunk)) + self._assert_edit_bundle( + self._find_event_in_chunk(chunk), + edit_event_id, + edit_event_content, + ) + + @override_config({"experimental_features": {"msc3925_inhibit_edit": True}}) + def test_edit_inhibit_replace(self) -> None: + """ + If msc3925_inhibit_edit is enabled, then the original event should not be + replaced. + """ + + new_body = {"msgtype": "m.text", "body": "I've been edited!"} + edit_event_content = { + "msgtype": "m.text", + "body": "foo", + "m.new_content": new_body, + } + channel = self._send_relation( + RelationTypes.REPLACE, + "m.room.message", + content=edit_event_content, + ) + edit_event_id = channel.json_body["event_id"] + + # /context should return the *original* event. + channel = self.make_request( + "GET", + f"/rooms/{self.room}/context/{self.parent_id}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + self.assertEqual( + channel.json_body["event"]["content"], {"body": "Hi!", "msgtype": "m.text"} + ) + self._assert_edit_bundle( + channel.json_body["event"], edit_event_id, edit_event_content + ) def test_multi_edit(self) -> None: """Test that multiple edits, including attempts by people who @@ -455,10 +541,15 @@ def test_multi_edit(self) -> None: ) new_body = {"msgtype": "m.text", "body": "I've been edited!"} + edit_event_content = { + "msgtype": "m.text", + "body": "foo", + "m.new_content": new_body, + } channel = self._send_relation( RelationTypes.REPLACE, "m.room.message", - content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + content=edit_event_content, ) edit_event_id = channel.json_body["event_id"] @@ -480,16 +571,8 @@ def test_multi_edit(self) -> None: self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(channel.json_body["event"]["content"], new_body) - - relations_dict = channel.json_body["event"]["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) - - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) - - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + self._assert_edit_bundle( + channel.json_body["event"], edit_event_id, edit_event_content ) def test_edit_reply(self) -> None: @@ -502,11 +585,15 @@ def test_edit_reply(self) -> None: ) reply = channel.json_body["event_id"] - new_body = {"msgtype": "m.text", "body": "I've been edited!"} + edit_event_content = { + "msgtype": "m.text", + "body": "foo", + "m.new_content": {"msgtype": "m.text", "body": "I've been edited!"}, + } channel = self._send_relation( RelationTypes.REPLACE, "m.room.message", - content={"msgtype": "m.text", "body": "foo", "m.new_content": new_body}, + content=edit_event_content, parent_id=reply, ) edit_event_id = channel.json_body["event_id"] @@ -549,28 +636,22 @@ def test_edit_reply(self) -> None: # We expect that the edit relation appears in the unsigned relations # section. - relations_dict = result_event_dict["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict, desc) - - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict, desc) - - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + self._assert_edit_bundle( + result_event_dict, edit_event_id, edit_event_content ) def test_edit_edit(self) -> None: """Test that an edit cannot be edited.""" new_body = {"msgtype": "m.text", "body": "Initial edit"} + edit_event_content = { + "msgtype": "m.text", + "body": "Wibble", + "m.new_content": new_body, + } channel = self._send_relation( RelationTypes.REPLACE, "m.room.message", - content={ - "msgtype": "m.text", - "body": "Wibble", - "m.new_content": new_body, - }, + content=edit_event_content, ) edit_event_id = channel.json_body["event_id"] @@ -599,8 +680,7 @@ def test_edit_edit(self) -> None: ) # The relations information should not include the edit to the edit. - relations_dict = channel.json_body["unsigned"].get("m.relations") - self.assertIn(RelationTypes.REPLACE, relations_dict) + self._assert_edit_bundle(channel.json_body, edit_event_id, edit_event_content) # /context should return the event updated for the *first* edit # (The edit to the edit should be ignored.) @@ -611,13 +691,8 @@ def test_edit_edit(self) -> None: ) self.assertEqual(200, channel.code, channel.json_body) self.assertEqual(channel.json_body["event"]["content"], new_body) - - m_replace_dict = relations_dict[RelationTypes.REPLACE] - for key in ["event_id", "sender", "origin_server_ts"]: - self.assertIn(key, m_replace_dict) - - self.assert_dict( - {"event_id": edit_event_id, "sender": self.user_id}, m_replace_dict + self._assert_edit_bundle( + channel.json_body["event"], edit_event_id, edit_event_content ) # Directly requesting the edit should not have the edit to the edit applied. From bc7ca704ddc34a76fe9e3e1f895f459ac9ac7186 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 11 Jan 2023 11:47:44 +0100 Subject: [PATCH 002/344] Add `tag` to `listeners` documentation (#14803) * Add `tag` to `listeners` documentation * newsfile --- changelog.d/14803.doc | 1 + docs/usage/administration/request_log.md | 4 ++-- docs/usage/configuration/config_documentation.md | 4 ++++ 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14803.doc diff --git a/changelog.d/14803.doc b/changelog.d/14803.doc new file mode 100644 index 000000000000..30d8ec8dbc01 --- /dev/null +++ b/changelog.d/14803.doc @@ -0,0 +1 @@ +Add missing documentation for `tag` to `listeners` section. \ No newline at end of file diff --git a/docs/usage/administration/request_log.md b/docs/usage/administration/request_log.md index 7dd9969d8668..292e3449f195 100644 --- a/docs/usage/administration/request_log.md +++ b/docs/usage/administration/request_log.md @@ -10,10 +10,10 @@ See the following for how to decode the dense data available from the default lo ``` -| Part | Explanation | +| Part | Explanation | | ----- | ------------ | | AAAA | Timestamp request was logged (not received) | -| BBBB | Logger name (`synapse.access.(http\|https).`, where 'tag' is defined in the `listeners` config section, normally the port) | +| BBBB | Logger name (`synapse.access.(http\|https).`, where 'tag' is defined in the [`listeners`](../configuration/config_documentation.md#listeners) config section, normally the port) | | CCCC | Line number in code | | DDDD | Log Level | | EEEE | Request Identifier (This identifier is shared by related log lines)| diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 93d6c7fb02dd..a355eef5297e 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -422,6 +422,10 @@ Sub-options for each listener include: * `port`: the TCP port to bind to. +* `tag`: An alias for the port in the logger name. If set the tag is logged instead +of the port. Default to `None`, is optional and only valid for listener with `type: http`. +See the docs [request log format](../administration/request_log.md). + * `bind_addresses`: a list of local addresses to listen on. The default is 'all local interfaces'. From 7b3a8f2b0cb19ec31f922829edf41a13e364d0e2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 11 Jan 2023 11:44:13 +0000 Subject: [PATCH 003/344] Add poetry.toml to .gitignore (#14807) --- .gitignore | 3 +++ changelog.d/14807.misc | 1 + 2 files changed, 4 insertions(+) create mode 100644 changelog.d/14807.misc diff --git a/.gitignore b/.gitignore index 2b09bddf18f6..6937de88bcd1 100644 --- a/.gitignore +++ b/.gitignore @@ -69,3 +69,6 @@ book/ # Poetry will create a setup.py, which we don't want to include. /setup.py + +# Don't include users' poetry configs +/poetry.toml diff --git a/changelog.d/14807.misc b/changelog.d/14807.misc new file mode 100644 index 000000000000..eef9cd3a44de --- /dev/null +++ b/changelog.d/14807.misc @@ -0,0 +1 @@ +Add local poetry config files (`poetry.toml`) to `.gitignore`. \ No newline at end of file From 73f097888eedaad05eda6b2453b6558158c0b032 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 11 Jan 2023 13:00:38 +0100 Subject: [PATCH 004/344] Add listener `health` (#14747) Fixes: #8780 --- changelog.d/14747.feature | 1 + docs/usage/configuration/config_documentation.md | 6 ++++++ synapse/app/generic_worker.py | 3 +++ synapse/app/homeserver.py | 3 +++ 4 files changed, 13 insertions(+) create mode 100644 changelog.d/14747.feature diff --git a/changelog.d/14747.feature b/changelog.d/14747.feature new file mode 100644 index 000000000000..0b8066159cba --- /dev/null +++ b/changelog.d/14747.feature @@ -0,0 +1 @@ +Add a dedicated listener configuration for `health` endpoint. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index a355eef5297e..294dd6edddcd 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -480,6 +480,12 @@ Valid resource names are: * `static`: static resources under synapse/static (/_matrix/static). (Mostly useful for 'fallback authentication'.) +* `health`: the [health check endpoint](../../reverse_proxy.md#health-check-endpoint). This endpoint + is by default active for all other resources and does not have to be activated separately. + This is only useful if you want to use the health endpoint explicitly on a dedicated port or + for [workers](../../workers.md) and containers without listener e.g. + [application services](../../workers.md#notifying-application-services). + Example configuration #1: ```yaml listeners: diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index bcc8abe20c1f..8108b1e98f7f 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -199,6 +199,9 @@ def _listen_http(self, listener_config: ListenerConfig) -> None: "A 'media' listener is configured but the media" " repository is disabled. Ignoring." ) + elif name == "health": + # Skip loading, health resource is always included + continue if name == "openid" and "federation" not in res.names: # Only load the openid resource separately if federation resource diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index b9be558c7ea0..6176a70eb2a0 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -96,6 +96,9 @@ def _listener_http( # Skip loading openid resource if federation is defined # since federation resource will include openid continue + if name == "health": + # Skip loading, health resource is always included + continue resources.update(self._configure_named_resource(name, res.compress)) additional_resources = listener_config.http_options.additional_resources From d6bda5adddd863409961dbafcd018356c213610e Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 11 Jan 2023 12:29:13 +0000 Subject: [PATCH 005/344] Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room. (#14799) --- changelog.d/14799.bugfix | 1 + .../storage/databases/main/events_bg_updates.py | 12 ++++++++++++ .../delta/73/24_events_jump_to_date_index.sql | 17 +++++++++++++++++ 3 files changed, 30 insertions(+) create mode 100644 changelog.d/14799.bugfix create mode 100644 synapse/storage/schema/main/delta/73/24_events_jump_to_date_index.sql diff --git a/changelog.d/14799.bugfix b/changelog.d/14799.bugfix new file mode 100644 index 000000000000..dc867bd93a7c --- /dev/null +++ b/changelog.d/14799.bugfix @@ -0,0 +1 @@ +Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room. \ No newline at end of file diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 9e31798ab198..b9d3c36d602c 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -69,6 +69,8 @@ class _BackgroundUpdates: EVENTS_POPULATE_STATE_KEY_REJECTIONS = "events_populate_state_key_rejections" + EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index" + @attr.s(slots=True, frozen=True, auto_attribs=True) class _CalculateChainCover: @@ -260,6 +262,16 @@ def __init__( self._background_events_populate_state_key_rejections, ) + # Add an index that would be useful for jumping to date using + # get_event_id_for_timestamp. + self.db_pool.updates.register_background_index_update( + _BackgroundUpdates.EVENTS_JUMP_TO_DATE_INDEX, + index_name="events_jump_to_date_idx", + table="events", + columns=["room_id", "origin_server_ts"], + where_clause="NOT outlier", + ) + async def _background_reindex_fields_sender( self, progress: JsonDict, batch_size: int ) -> int: diff --git a/synapse/storage/schema/main/delta/73/24_events_jump_to_date_index.sql b/synapse/storage/schema/main/delta/73/24_events_jump_to_date_index.sql new file mode 100644 index 000000000000..67059909a1d2 --- /dev/null +++ b/synapse/storage/schema/main/delta/73/24_events_jump_to_date_index.sql @@ -0,0 +1,17 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7324, 'events_jump_to_date_index', '{}'); From 7f2cabf271e6027da4001571b9e6584ade3c2c8c Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 11 Jan 2023 07:35:40 -0500 Subject: [PATCH 006/344] Fix-up type hints for tests.push module. (#14816) --- changelog.d/14816.misc | 1 + mypy.ini | 5 +-- stubs/synapse/synapse_rust/push.pyi | 5 ++- tests/push/test_email.py | 46 ++++++++++++++------------ tests/push/test_http.py | 2 +- tests/push/test_presentable_names.py | 44 ++++++++++++------------ tests/push/test_push_rule_evaluator.py | 26 +++++++-------- 7 files changed, 67 insertions(+), 62 deletions(-) create mode 100644 changelog.d/14816.misc diff --git a/changelog.d/14816.misc b/changelog.d/14816.misc new file mode 100644 index 000000000000..d44571b73149 --- /dev/null +++ b/changelog.d/14816.misc @@ -0,0 +1 @@ +Add missing type hints. diff --git a/mypy.ini b/mypy.ini index 013fbbdfc02a..468bfe588ccc 100644 --- a/mypy.ini +++ b/mypy.ini @@ -48,9 +48,6 @@ exclude = (?x) |tests/logging/__init__.py |tests/logging/test_terse_json.py |tests/module_api/test_api.py - |tests/push/test_email.py - |tests/push/test_presentable_names.py - |tests/push/test_push_rule_evaluator.py |tests/rest/client/test_transactions.py |tests/rest/media/v1/test_media_storage.py |tests/server.py @@ -101,7 +98,7 @@ disallow_untyped_defs = True [mypy-tests.metrics.*] disallow_untyped_defs = True -[mypy-tests.push.test_bulk_push_rule_evaluator] +[mypy-tests.push.*] disallow_untyped_defs = True [mypy-tests.rest.*] diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index dab5d4aff7ce..67b2fb99abbd 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -1,4 +1,4 @@ -from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union +from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union from synapse.types import JsonDict @@ -54,3 +54,6 @@ class PushRuleEvaluator: user_id: Optional[str], display_name: Optional[str], ) -> Collection[Union[Mapping, str]]: ... + def matches( + self, condition: JsonDict, user_id: Optional[str], display_name: Optional[str] + ) -> bool: ... diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 57b2f0536e4a..ab8bb417e759 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -13,25 +13,28 @@ # limitations under the License. import email.message import os -from typing import Dict, List, Sequence, Tuple +from typing import Any, Dict, List, Sequence, Tuple import attr import pkg_resources from twisted.internet.defer import Deferred +from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin from synapse.api.errors import Codes, SynapseError from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests.unittest import HomeserverTestCase -@attr.s +@attr.s(auto_attribs=True) class _User: "Helper wrapper for user ID and access token" - id = attr.ib() - token = attr.ib() + id: str + token: str class EmailPusherTests(HomeserverTestCase): @@ -41,10 +44,9 @@ class EmailPusherTests(HomeserverTestCase): room.register_servlets, login.register_servlets, ] - user_id = True hijack_auth = False - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() config["email"] = { @@ -72,17 +74,17 @@ def make_homeserver(self, reactor, clock): # List[Tuple[Deferred, args, kwargs]] self.email_attempts: List[Tuple[Deferred, Sequence, Dict]] = [] - def sendmail(*args, **kwargs): + def sendmail(*args: Any, **kwargs: Any) -> Deferred: # This mocks out synapse.reactor.send_email._sendmail. - d = Deferred() + d: Deferred = Deferred() self.email_attempts.append((d, args, kwargs)) return d - hs.get_send_email_handler()._sendmail = sendmail + hs.get_send_email_handler()._sendmail = sendmail # type: ignore[assignment] return hs - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Register the user who gets notified self.user_id = self.register_user("user", "pass") self.access_token = self.login("user", "pass") @@ -129,7 +131,7 @@ def prepare(self, reactor, clock, hs): self.auth_handler = hs.get_auth_handler() self.store = hs.get_datastores().main - def test_need_validated_email(self): + def test_need_validated_email(self) -> None: """Test that we can only add an email pusher if the user has validated their email. """ @@ -151,7 +153,7 @@ def test_need_validated_email(self): self.assertEqual(400, cm.exception.code) self.assertEqual(Codes.THREEPID_NOT_FOUND, cm.exception.errcode) - def test_simple_sends_email(self): + def test_simple_sends_email(self) -> None: # Create a simple room with two users room = self.helper.create_room_as(self.user_id, tok=self.access_token) self.helper.invite( @@ -171,7 +173,7 @@ def test_simple_sends_email(self): self._check_for_mail() - def test_invite_sends_email(self): + def test_invite_sends_email(self) -> None: # Create a room and invite the user to it room = self.helper.create_room_as(self.others[0].id, tok=self.others[0].token) self.helper.invite( @@ -184,7 +186,7 @@ def test_invite_sends_email(self): # We should get emailed about the invite self._check_for_mail() - def test_invite_to_empty_room_sends_email(self): + def test_invite_to_empty_room_sends_email(self) -> None: # Create a room and invite the user to it room = self.helper.create_room_as(self.others[0].id, tok=self.others[0].token) self.helper.invite( @@ -200,7 +202,7 @@ def test_invite_to_empty_room_sends_email(self): # We should get emailed about the invite self._check_for_mail() - def test_multiple_members_email(self): + def test_multiple_members_email(self) -> None: # We want to test multiple notifications, so we pause processing of push # while we send messages. self.pusher._pause_processing() @@ -227,7 +229,7 @@ def test_multiple_members_email(self): # We should get emailed about those messages self._check_for_mail() - def test_multiple_rooms(self): + def test_multiple_rooms(self) -> None: # We want to test multiple notifications from multiple rooms, so we pause # processing of push while we send messages. self.pusher._pause_processing() @@ -257,7 +259,7 @@ def test_multiple_rooms(self): # We should get emailed about those messages self._check_for_mail() - def test_room_notifications_include_avatar(self): + def test_room_notifications_include_avatar(self) -> None: # Create a room and set its avatar. room = self.helper.create_room_as(self.user_id, tok=self.access_token) self.helper.send_state( @@ -290,7 +292,7 @@ def test_room_notifications_include_avatar(self): ) self.assertIn("_matrix/media/v1/thumbnail/DUMMY_MEDIA_ID", html) - def test_empty_room(self): + def test_empty_room(self) -> None: """All users leaving a room shouldn't cause the pusher to break.""" # Create a simple room with two users room = self.helper.create_room_as(self.user_id, tok=self.access_token) @@ -309,7 +311,7 @@ def test_empty_room(self): # We should get emailed about that message self._check_for_mail() - def test_empty_room_multiple_messages(self): + def test_empty_room_multiple_messages(self) -> None: """All users leaving a room shouldn't cause the pusher to break.""" # Create a simple room with two users room = self.helper.create_room_as(self.user_id, tok=self.access_token) @@ -329,7 +331,7 @@ def test_empty_room_multiple_messages(self): # We should get emailed about that message self._check_for_mail() - def test_encrypted_message(self): + def test_encrypted_message(self) -> None: room = self.helper.create_room_as(self.user_id, tok=self.access_token) self.helper.invite( room=room, src=self.user_id, tok=self.access_token, targ=self.others[0].id @@ -342,7 +344,7 @@ def test_encrypted_message(self): # We should get emailed about that message self._check_for_mail() - def test_no_email_sent_after_removed(self): + def test_no_email_sent_after_removed(self) -> None: # Create a simple room with two users room = self.helper.create_room_as(self.user_id, tok=self.access_token) self.helper.invite( @@ -379,7 +381,7 @@ def test_no_email_sent_after_removed(self): pushers = list(pushers) self.assertEqual(len(pushers), 0) - def test_remove_unlinked_pushers_background_job(self): + def test_remove_unlinked_pushers_background_job(self) -> None: """Checks that all existing pushers associated with unlinked email addresses are removed upon running the remove_deleted_email_pushers background update. """ diff --git a/tests/push/test_http.py b/tests/push/test_http.py index afaafe79aaf6..23447cc31008 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -46,7 +46,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: m = Mock() - def post_json_get_json(url, body): + def post_json_get_json(url: str, body: JsonDict) -> Deferred: d: Deferred = Deferred() self.push_attempts.append((d, url, body)) return make_deferred_yieldable(d) diff --git a/tests/push/test_presentable_names.py b/tests/push/test_presentable_names.py index aff563919d43..d37f8ce26236 100644 --- a/tests/push/test_presentable_names.py +++ b/tests/push/test_presentable_names.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterable, Optional, Tuple +from typing import Iterable, List, Optional, Tuple, cast from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions -from synapse.events import FrozenEvent +from synapse.events import EventBase, FrozenEvent from synapse.push.presentable_names import calculate_room_name from synapse.types import StateKey, StateMap @@ -51,13 +51,15 @@ def __init__(self, events: Iterable[Tuple[StateKey, dict]]): ) async def get_event( - self, event_id: StateKey, allow_none: bool = False + self, event_id: str, allow_none: bool = False ) -> Optional[FrozenEvent]: assert allow_none, "Mock not configured for allow_none = False" - return self._events.get(event_id) + # Decode the state key from the event ID. + state_key = cast(Tuple[str, str], tuple(event_id.split("|", 1))) + return self._events.get(state_key) - async def get_events(self, event_ids: Iterable[StateKey]): + async def get_events(self, event_ids: Iterable[StateKey]) -> StateMap[EventBase]: # This is cheating since it just returns all events. return self._events @@ -68,17 +70,17 @@ class PresentableNamesTestCase(unittest.HomeserverTestCase): def _calculate_room_name( self, - events: StateMap[dict], + events: Iterable[Tuple[Tuple[str, str], dict]], user_id: str = "", fallback_to_members: bool = True, fallback_to_single_member: bool = True, - ): - # This isn't 100% accurate, but works with MockDataStore. - room_state_ids = {k[0]: k[0] for k in events} + ) -> Optional[str]: + # Encode the state key into the event ID. + room_state_ids = {k[0]: "|".join(k[0]) for k in events} return self.get_success( calculate_room_name( - MockDataStore(events), + MockDataStore(events), # type: ignore[arg-type] room_state_ids, user_id or self.USER_ID, fallback_to_members, @@ -86,9 +88,9 @@ def _calculate_room_name( ) ) - def test_name(self): + def test_name(self) -> None: """A room name event should be used.""" - events = [ + events: List[Tuple[Tuple[str, str], dict]] = [ ((EventTypes.Name, ""), {"name": "test-name"}), ] self.assertEqual("test-name", self._calculate_room_name(events)) @@ -100,9 +102,9 @@ def test_name(self): events = [((EventTypes.Name, ""), {"name": 1})] self.assertEqual(1, self._calculate_room_name(events)) - def test_canonical_alias(self): + def test_canonical_alias(self) -> None: """An canonical alias should be used.""" - events = [ + events: List[Tuple[Tuple[str, str], dict]] = [ ((EventTypes.CanonicalAlias, ""), {"alias": "#test-name:test"}), ] self.assertEqual("#test-name:test", self._calculate_room_name(events)) @@ -114,9 +116,9 @@ def test_canonical_alias(self): events = [((EventTypes.CanonicalAlias, ""), {"alias": "test-name"})] self.assertEqual("Empty Room", self._calculate_room_name(events)) - def test_invite(self): + def test_invite(self) -> None: """An invite has special behaviour.""" - events = [ + events: List[Tuple[Tuple[str, str], dict]] = [ ((EventTypes.Member, self.USER_ID), {"membership": Membership.INVITE}), ((EventTypes.Member, self.OTHER_USER_ID), {"displayname": "Other User"}), ] @@ -140,9 +142,9 @@ def test_invite(self): ] self.assertEqual("Room Invite", self._calculate_room_name(events)) - def test_no_members(self): + def test_no_members(self) -> None: """Behaviour of an empty room.""" - events = [] + events: List[Tuple[Tuple[str, str], dict]] = [] self.assertEqual("Empty Room", self._calculate_room_name(events)) # Note that events with invalid (or missing) membership are ignored. @@ -152,7 +154,7 @@ def test_no_members(self): ] self.assertEqual("Empty Room", self._calculate_room_name(events)) - def test_no_other_members(self): + def test_no_other_members(self) -> None: """Behaviour of a room with no other members in it.""" events = [ ( @@ -185,7 +187,7 @@ def test_no_other_members(self): self._calculate_room_name(events, user_id=self.OTHER_USER_ID), ) - def test_one_other_member(self): + def test_one_other_member(self) -> None: """Behaviour of a room with a single other member.""" events = [ ((EventTypes.Member, self.USER_ID), {"membership": Membership.JOIN}), @@ -209,7 +211,7 @@ def test_one_other_member(self): ] self.assertEqual("@user:test", self._calculate_room_name(events)) - def test_other_members(self): + def test_other_members(self) -> None: """Behaviour of a room with multiple other members.""" # Two other members. events = [ diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 5ababe6a39a6..1b87756b751a 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Optional, Union +from typing import Dict, List, Optional, Union, cast import frozendict @@ -30,7 +30,7 @@ from synapse.server import HomeServer from synapse.storage.databases.main.appservice import _make_exclusive_regex from synapse.synapse_rust.push import PushRuleEvaluator -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, JsonMapping, UserID from synapse.util import Clock from tests import unittest @@ -39,7 +39,7 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): def _get_evaluator( - self, content: JsonDict, related_events=None + self, content: JsonMapping, related_events: Optional[JsonDict] = None ) -> PushRuleEvaluator: event = FrozenEvent( { @@ -59,7 +59,7 @@ def _get_evaluator( _flatten_dict(event), room_member_count, sender_power_level, - power_levels.get("notifications", {}), + cast(Dict[str, int], power_levels.get("notifications", {})), {} if related_events is None else related_events, True, event.room_version.msc3931_push_features, @@ -70,9 +70,7 @@ def test_display_name(self) -> None: """Check for a matching display name in the body of the event.""" evaluator = self._get_evaluator({"body": "foo bar baz"}) - condition = { - "kind": "contains_display_name", - } + condition = {"kind": "contains_display_name"} # Blank names are skipped. self.assertFalse(evaluator.matches(condition, "@user:test", "")) @@ -93,7 +91,7 @@ def test_display_name(self) -> None: self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar")) def _assert_matches( - self, condition: JsonDict, content: JsonDict, msg: Optional[str] = None + self, condition: JsonDict, content: JsonMapping, msg: Optional[str] = None ) -> None: evaluator = self._get_evaluator(content) self.assertTrue(evaluator.matches(condition, "@user:test", "display_name"), msg) @@ -287,7 +285,7 @@ def test_tweaks_for_actions(self) -> None: This tests the behaviour of tweaks_for_actions. """ - actions = [ + actions: List[Union[Dict[str, str], str]] = [ {"set_tweak": "sound", "value": "default"}, {"set_tweak": "highlight"}, "notify", @@ -298,7 +296,7 @@ def test_tweaks_for_actions(self) -> None: {"sound": "default", "highlight": True}, ) - def test_related_event_match(self): + def test_related_event_match(self) -> None: evaluator = self._get_evaluator( { "m.relates_to": { @@ -397,7 +395,7 @@ def test_related_event_match(self): ) ) - def test_related_event_match_with_fallback(self): + def test_related_event_match_with_fallback(self) -> None: evaluator = self._get_evaluator( { "m.relates_to": { @@ -469,7 +467,7 @@ def test_related_event_match_with_fallback(self): ) ) - def test_related_event_match_no_related_event(self): + def test_related_event_match_no_related_event(self) -> None: evaluator = self._get_evaluator( {"msgtype": "m.text", "body": "Message without related event"} ) @@ -518,7 +516,9 @@ class TestBulkPushRuleEvaluator(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: # Define an application service so that we can register appservice users self._service_token = "some_token" self._service = ApplicationService( From 5172c8c403d94ea5f184abc8b3c37dbd19a849bc Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 11 Jan 2023 13:21:53 +0000 Subject: [PATCH 007/344] Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state. (#14749) Signed-off-by: Olivier Wilkinson (reivilibre) Co-authored-by: Sean Quah --- changelog.d/14749.misc | 1 + synapse/handlers/message.py | 21 ++++++++++++++++----- 2 files changed, 17 insertions(+), 5 deletions(-) create mode 100644 changelog.d/14749.misc diff --git a/changelog.d/14749.misc b/changelog.d/14749.misc new file mode 100644 index 000000000000..ff813252250d --- /dev/null +++ b/changelog.d/14749.misc @@ -0,0 +1 @@ +Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state. \ No newline at end of file diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 88fc51a4c97e..3278a695ed61 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1531,12 +1531,23 @@ async def cache_joined_hosts_for_events( external federation senders don't have to recalculate it themselves. """ - for event, _ in events_and_context: - if not self._external_cache.is_enabled(): - return + if not self._external_cache.is_enabled(): + return - # If external cache is enabled we should always have this. - assert self._external_cache_joined_hosts_updates is not None + # If external cache is enabled we should always have this. + assert self._external_cache_joined_hosts_updates is not None + + for event, event_context in events_and_context: + if event_context.partial_state: + # To populate the cache for a partial-state event, we either have to + # block until full state, which the code below does, or change the + # meaning of cache values to be the list of hosts to which we plan to + # send events and calculate that instead. + # + # The federation senders don't use the external cache when sending + # events in partial-state rooms anyway, so let's not bother populating + # the cache. + continue # We actually store two mappings, event ID -> prev state group, # state group -> joined hosts, which is much more space efficient From f4d2a734f977c0a88f3a5fcdea38b7f9490481dc Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 11 Jan 2023 15:21:12 +0000 Subject: [PATCH 008/344] Remove outdated commands from the code style doc & point to the contributing guide. (#14773) --- changelog.d/14773.doc | 1 + docs/code_style.md | 15 +++------------ 2 files changed, 4 insertions(+), 12 deletions(-) create mode 100644 changelog.d/14773.doc diff --git a/changelog.d/14773.doc b/changelog.d/14773.doc new file mode 100644 index 000000000000..0992444be07d --- /dev/null +++ b/changelog.d/14773.doc @@ -0,0 +1 @@ +Remove duplicate commands from the Code Style documentation page; point to the Contributing Guide instead. \ No newline at end of file diff --git a/docs/code_style.md b/docs/code_style.md index 3aa7d0d741ba..026001b8a3cf 100644 --- a/docs/code_style.md +++ b/docs/code_style.md @@ -13,23 +13,14 @@ The necessary tools are: - [ruff](https://github.com/charliermarsh/ruff), which can spot common errors; and - [mypy](https://mypy.readthedocs.io/en/stable/), a type checker. -Install them with: - -```sh -pip install -e ".[lint,mypy]" -``` - -The easiest way to run the lints is to invoke the linter script as follows. - -```sh -scripts-dev/lint.sh -``` +See [the contributing guide](development/contributing_guide.md#run-the-linters) for instructions +on how to install the above tools and run the linters. It's worth noting that modern IDEs and text editors can run these tools automatically on save. It may be worth looking into whether this functionality is supported in your editor for a more convenient development workflow. It is not, however, recommended to run `mypy` -on save as they take a while and can be very resource intensive. +on save as it takes a while and can be very resource intensive. ## General rules From 071f8b0f9bd8f06eec4bae5f81f6d759cbdf88f2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 11 Jan 2023 13:36:41 -0500 Subject: [PATCH 009/344] Factor out common code in tests and fix comments. (#14819) --- changelog.d/14819.misc | 1 + stubs/synapse/synapse_rust/push.pyi | 14 ++++ tests/push/test_bulk_push_rule_evaluator.py | 85 ++++++++++++--------- 3 files changed, 64 insertions(+), 36 deletions(-) create mode 100644 changelog.d/14819.misc diff --git a/changelog.d/14819.misc b/changelog.d/14819.misc new file mode 100644 index 000000000000..9c568dbc9cf7 --- /dev/null +++ b/changelog.d/14819.misc @@ -0,0 +1 @@ +Refactor push tests. diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 67b2fb99abbd..b91f2edd7b0f 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -1,3 +1,17 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union from synapse.types import JsonDict diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 1cd453248ec2..9c17a42b650f 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -1,10 +1,28 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + from unittest.mock import patch +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.room_versions import RoomVersions from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator from synapse.rest import admin from synapse.rest.client import login, register, room +from synapse.server import HomeServer from synapse.types import create_requester +from synapse.util import Clock from tests.test_utils import simple_async_mock from tests.unittest import HomeserverTestCase, override_config @@ -19,6 +37,20 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): register.register_servlets, ] + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + # Create a new user and room. + self.alice = self.register_user("alice", "pass") + self.token = self.login(self.alice, "pass") + self.requester = create_requester(self.alice) + + self.room_id = self.helper.create_room_as( + self.alice, room_version=RoomVersions.V9.identifier, tok=self.token + ) + + self.event_creation_handler = self.hs.get_event_creation_handler() + def test_action_for_event_by_user_handles_noninteger_power_levels(self) -> None: """We should convert floats and strings to integers before passing to Rust. @@ -26,46 +58,37 @@ def test_action_for_event_by_user_handles_noninteger_power_levels(self) -> None: A lack of validation: the gift that keeps on giving. """ - # Create a new user and room. - alice = self.register_user("alice", "pass") - token = self.login(alice, "pass") - - room_id = self.helper.create_room_as( - alice, room_version=RoomVersions.V9.identifier, tok=token - ) # Alter the power levels in that room to include stringy and floaty levels. # We need to suppress the validation logic or else it will reject these dodgy # values. (Presumably this validation was not always present.) - event_creation_handler = self.hs.get_event_creation_handler() - requester = create_requester(alice) with patch("synapse.events.validator.validate_canonicaljson"), patch( "synapse.events.validator.jsonschema.validate" ): self.helper.send_state( - room_id, + self.room_id, "m.room.power_levels", { - "users": {alice: "100"}, # stringy + "users": {self.alice: "100"}, # stringy "notifications": {"room": 100.0}, # float }, - token, + self.token, state_key="", ) # Create a new message event, and try to evaluate it under the dodgy # power level event. event, context = self.get_success( - event_creation_handler.create_event( - requester, + self.event_creation_handler.create_event( + self.requester, { "type": "m.room.message", - "room_id": room_id, + "room_id": self.room_id, "content": { "msgtype": "m.text", "body": "helo", }, - "sender": alice, + "sender": self.alice, }, ) ) @@ -77,39 +100,29 @@ def test_action_for_event_by_user_handles_noninteger_power_levels(self) -> None: @override_config({"push": {"enabled": False}}) def test_action_for_event_by_user_disabled_by_config(self) -> None: """Ensure that push rules are not calculated when disabled in the config""" - # Create a new user and room. - alice = self.register_user("alice", "pass") - token = self.login(alice, "pass") - room_id = self.helper.create_room_as( - alice, room_version=RoomVersions.V9.identifier, tok=token - ) - - # Alter the power levels in that room to include stringy and floaty levels. - # We need to suppress the validation logic or else it will reject these dodgy - # values. (Presumably this validation was not always present.) - event_creation_handler = self.hs.get_event_creation_handler() - requester = create_requester(alice) - - # Create a new message event, and try to evaluate it under the dodgy - # power level event. + # Create a new message event which should cause a notification. event, context = self.get_success( - event_creation_handler.create_event( - requester, + self.event_creation_handler.create_event( + self.requester, { "type": "m.room.message", - "room_id": room_id, + "room_id": self.room_id, "content": { "msgtype": "m.text", "body": "helo", }, - "sender": alice, + "sender": self.alice, }, ) ) bulk_evaluator = BulkPushRuleEvaluator(self.hs) + # Mock the method which calculates push rules -- we do this instead of + # e.g. checking the results in the database because we want to ensure + # that code isn't even running. bulk_evaluator._action_for_event_by_user = simple_async_mock() # type: ignore[assignment] - # should not raise + + # Ensure no actions are generated! self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)])) bulk_evaluator._action_for_event_by_user.assert_not_called() From dd9e71dc7fa91b81adfaaf8669aaf7ee976ffcd7 Mon Sep 17 00:00:00 2001 From: Emelie Graven Date: Wed, 11 Jan 2023 19:41:52 +0100 Subject: [PATCH 010/344] Add `set_displayname` to the module API (#14629) --- changelog.d/14629.feature | 1 + synapse/module_api/__init__.py | 27 +++++++++++++++++++++++++++ tests/module_api/test_api.py | 18 ++++++++++++++++++ 3 files changed, 46 insertions(+) create mode 100644 changelog.d/14629.feature diff --git a/changelog.d/14629.feature b/changelog.d/14629.feature new file mode 100644 index 000000000000..78f5fc24035f --- /dev/null +++ b/changelog.d/14629.feature @@ -0,0 +1 @@ +Adds a `set_displayname()` method to the module API for setting a user's display name. diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 6f4a934b0509..6153a48257b3 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1585,6 +1585,33 @@ async def create_room( return room_id_and_alias["room_id"], room_id_and_alias.get("room_alias", None) + async def set_displayname( + self, + user_id: UserID, + new_displayname: str, + deactivation: bool = False, + ) -> None: + """Sets a user's display name. + + Added in Synapse v1.76.0. + + Args: + user_id: + The user whose display name is to be changed. + new_displayname: + The new display name to give the user. + deactivation: + Whether this change was made while deactivating the user. + """ + requester = create_requester(user_id) + await self._hs.get_profile_handler().set_displayname( + target_user=user_id, + requester=requester, + new_displayname=new_displayname, + by_admin=True, + deactivation=deactivation, + ) + class PublicRoomListManager: """Contains methods for adding to, removing from and querying whether a room diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index b0f3f4374da8..9919938e8071 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -110,6 +110,24 @@ def test_can_set_admin(self): self.assertEqual(found_user.user_id.to_string(), user_id) self.assertIdentical(found_user.is_admin, True) + def test_can_set_displayname(self): + localpart = "alice_wants_a_new_displayname" + user_id = self.register_user( + localpart, "1234", displayname="Alice", admin=False + ) + found_userinfo = self.get_success(self.module_api.get_userinfo_by_id(user_id)) + + self.get_success( + self.module_api.set_displayname( + found_userinfo.user_id, "Bob", deactivation=False + ) + ) + found_profile = self.get_success( + self.module_api.get_profile_for_user(localpart) + ) + + self.assertEqual(found_profile.display_name, "Bob") + def test_get_userinfo_by_id(self): user_id = self.register_user("alice", "1234") found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id)) From b50c008453001aee8dd7dbd6f36ec32039e6ce76 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 12 Jan 2023 10:52:07 +0000 Subject: [PATCH 011/344] Re-enable some linting (#14821) * Re-enable some linting * Newsfile * Remove comment --- changelog.d/14821.misc | 1 + pyproject.toml | 8 -------- stubs/sortedcontainers/sortedlist.pyi | 1 - stubs/sortedcontainers/sortedset.pyi | 2 -- stubs/synapse/synapse_rust/push.pyi | 2 +- synapse/config/_base.pyi | 10 ++++------ tests/storage/test_event_push_actions.py | 6 +++--- 7 files changed, 9 insertions(+), 21 deletions(-) create mode 100644 changelog.d/14821.misc diff --git a/changelog.d/14821.misc b/changelog.d/14821.misc new file mode 100644 index 000000000000..99e4e5e8a12e --- /dev/null +++ b/changelog.d/14821.misc @@ -0,0 +1 @@ +Re-enable some linting that was disabled when we switched to ruff. diff --git a/pyproject.toml b/pyproject.toml index 740d33066e1c..10d50ddb45f7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -48,11 +48,6 @@ line-length = 88 # E731: do not assign a lambda expression, use a def # E501: Line too long (black enforces this for us) # -# See https://github.com/charliermarsh/ruff/#pyflakes -# F401: unused import -# F811: Redefinition of unused -# F821: Undefined name -# # flake8-bugbear compatible checks. Its error codes are described at # https://github.com/charliermarsh/ruff/#flake8-bugbear # B019: Use of functools.lru_cache or functools.cache on methods can lead to memory leaks @@ -64,9 +59,6 @@ ignore = [ "B024", "E501", "E731", - "F401", - "F811", - "F821", ] select = [ # pycodestyle checks. diff --git a/stubs/sortedcontainers/sortedlist.pyi b/stubs/sortedcontainers/sortedlist.pyi index cd4c969849b1..1fe1a136f190 100644 --- a/stubs/sortedcontainers/sortedlist.pyi +++ b/stubs/sortedcontainers/sortedlist.pyi @@ -7,7 +7,6 @@ from __future__ import annotations from typing import ( Any, Callable, - Generic, Iterable, Iterator, List, diff --git a/stubs/sortedcontainers/sortedset.pyi b/stubs/sortedcontainers/sortedset.pyi index d761c438f792..6db11eacbed9 100644 --- a/stubs/sortedcontainers/sortedset.pyi +++ b/stubs/sortedcontainers/sortedset.pyi @@ -5,10 +5,8 @@ from __future__ import annotations from typing import ( - AbstractSet, Any, Callable, - Generic, Hashable, Iterable, Iterator, diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index b91f2edd7b0f..373b40740b37 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union +from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union from synapse.types import JsonDict diff --git a/synapse/config/_base.pyi b/synapse/config/_base.pyi index bd265de53613..b5cec132b4c1 100644 --- a/synapse/config/_base.pyi +++ b/synapse/config/_base.pyi @@ -1,5 +1,3 @@ -from __future__ import annotations - import argparse from typing import ( Any, @@ -20,7 +18,7 @@ from typing import ( import jinja2 -from synapse.config import ( +from synapse.config import ( # noqa: F401 account_validity, api, appservice, @@ -169,7 +167,7 @@ class RootConfig: self, section_name: Literal["caches"] ) -> cache.CacheConfig: ... @overload - def reload_config_section(self, section_name: str) -> Config: ... + def reload_config_section(self, section_name: str) -> "Config": ... class Config: root: RootConfig @@ -202,9 +200,9 @@ def find_config_files(search_paths: List[str]) -> List[str]: ... class ShardedWorkerHandlingConfig: instances: List[str] def __init__(self, instances: List[str]) -> None: ... - def should_handle(self, instance_name: str, key: str) -> bool: ... + def should_handle(self, instance_name: str, key: str) -> bool: ... # noqa: F811 class RoutableShardedWorkerHandlingConfig(ShardedWorkerHandlingConfig): - def get_instance(self, key: str) -> str: ... + def get_instance(self, key: str) -> str: ... # noqa: F811 def read_file(file_path: Any, config_path: Iterable[str]) -> str: ... diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 5fa8bd2d98ce..76c06a9d1e72 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -154,7 +154,7 @@ def test_count_aggregation(self) -> None: # Create a user to receive notifications and send receipts. user_id, token, _, other_token, room_id = self._create_users_and_room() - last_event_id: str + last_event_id = "" def _assert_counts(notif_count: int, highlight_count: int) -> None: counts = self.get_success( @@ -289,7 +289,7 @@ def test_count_aggregation_threads(self) -> None: user_id, token, _, other_token, room_id = self._create_users_and_room() thread_id: str - last_event_id: str + last_event_id = "" def _assert_counts( notif_count: int, @@ -471,7 +471,7 @@ def test_count_aggregation_mixed(self) -> None: user_id, token, _, other_token, room_id = self._create_users_and_room() thread_id: str - last_event_id: str + last_event_id = "" def _assert_counts( notif_count: int, From f5ea9f2b1d9e3e3def783b7b61760f2d859ba978 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 12 Jan 2023 16:20:34 +0000 Subject: [PATCH 012/344] Add rust linting commands to `scripts-dev/lint.sh` (#14822) --- changelog.d/14822.misc | 1 + scripts-dev/lint.sh | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 changelog.d/14822.misc diff --git a/changelog.d/14822.misc b/changelog.d/14822.misc new file mode 100644 index 000000000000..5e02cc84885d --- /dev/null +++ b/changelog.d/14822.misc @@ -0,0 +1 @@ +Add `cargo fmt` and `cargo clippy` to the lint script. \ No newline at end of file diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 2bf58ac5d4a0..392c509a8ac1 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -101,10 +101,43 @@ echo # Print out the commands being run set -x +# Ensure the sort order of imports. isort "${files[@]}" + +# Ensure Python code conforms to an opinionated style. python3 -m black "${files[@]}" + +# Ensure the sample configuration file conforms to style checks. ./scripts-dev/config-lint.sh + +# Catch any common programming mistakes in Python code. # --quiet suppresses the update check. ruff --quiet "${files[@]}" + +# Catch any common programming mistakes in Rust code. +# +# --bins, --examples, --lib, --tests combined explicitly disable checking +# the benchmarks, which can fail due to `#![feature]` macros not being +# allowed on the stable rust toolchain (rustc error E0554). +# +# --allow-staged and --allow-dirty suppress clippy raising errors +# for uncommitted files. Only needed when using --fix. +# +# -D warnings disables the "warnings" lint. +# +# Using --fix has a tendency to cause subsequent runs of clippy to recompile +# rust code, which can slow down this script. Thus we run clippy without --fix +# first which is quick, and then re-run it with --fix if an error was found. +if ! cargo-clippy --bins --examples --lib --tests -- -D warnings > /dev/null 2>&1; then + cargo-clippy \ + --bins --examples --lib --tests --allow-staged --allow-dirty --fix -- -D warnings +fi + +# Ensure the formatting of Rust code. +cargo-fmt + +# Ensure all Pydantic models use strict types. ./scripts-dev/check_pydantic_models.py lint + +# Ensure type hints are correct. mypy From 772e8c23856e27960caba4dd87af42401b6c0cac Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 13 Jan 2023 00:16:21 +0000 Subject: [PATCH 013/344] Fix stack overflow in `_PerHostRatelimiter` due to synchronous requests (#14812) When there are many synchronous requests waiting on a `_PerHostRatelimiter`, each request will be started recursively just after the previous request has completed. Under the right conditions, this leads to stack exhaustion. A common way for requests to become synchronous is when the remote client disconnects early, because the homeserver is overloaded and slow to respond. Avoid stack exhaustion under these conditions by deferring subsequent requests until the next reactor tick. Fixes #14480. Signed-off-by: Sean Quah --- changelog.d/14812.bugfix | 1 + synapse/rest/client/register.py | 1 + synapse/server.py | 1 + synapse/util/ratelimitutils.py | 34 ++++++++++++++++------- tests/util/test_ratelimitutils.py | 45 ++++++++++++++++++++++++++++--- 5 files changed, 70 insertions(+), 12 deletions(-) create mode 100644 changelog.d/14812.bugfix diff --git a/changelog.d/14812.bugfix b/changelog.d/14812.bugfix new file mode 100644 index 000000000000..94e0d70cbcc4 --- /dev/null +++ b/changelog.d/14812.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 3cb1e7e37501..be696c304bb9 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -310,6 +310,7 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.registration_handler = hs.get_registration_handler() self.ratelimiter = FederationRateLimiter( + hs.get_reactor(), hs.get_clock(), FederationRatelimitSettings( # Time window of 2s diff --git a/synapse/server.py b/synapse/server.py index f4ab94c4f33c..c8752baa5ae0 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -768,6 +768,7 @@ def get_replication_streams(self) -> Dict[str, Stream]: @cache_in_self def get_federation_ratelimiter(self) -> FederationRateLimiter: return FederationRateLimiter( + self.get_reactor(), self.get_clock(), config=self.config.ratelimiting.rc_federation, metrics_name="federation_servlets", diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 2aceb1a47fb6..bd72947bfed7 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -34,6 +34,7 @@ from typing_extensions import ContextManager from twisted.internet import defer +from twisted.internet.interfaces import IReactorTime from synapse.api.errors import LimitExceededError from synapse.config.ratelimiting import FederationRatelimitSettings @@ -146,12 +147,14 @@ class FederationRateLimiter: def __init__( self, + reactor: IReactorTime, clock: Clock, config: FederationRatelimitSettings, metrics_name: Optional[str] = None, ): """ Args: + reactor clock config metrics_name: The name of the rate limiter so we can differentiate it @@ -163,7 +166,7 @@ def __init__( def new_limiter() -> "_PerHostRatelimiter": return _PerHostRatelimiter( - clock=clock, config=config, metrics_name=metrics_name + reactor=reactor, clock=clock, config=config, metrics_name=metrics_name ) self.ratelimiters: DefaultDict[ @@ -194,12 +197,14 @@ def ratelimit(self, host: str) -> "_GeneratorContextManager[defer.Deferred[None] class _PerHostRatelimiter: def __init__( self, + reactor: IReactorTime, clock: Clock, config: FederationRatelimitSettings, metrics_name: Optional[str] = None, ): """ Args: + reactor clock config metrics_name: The name of the rate limiter so we can differentiate it @@ -207,6 +212,7 @@ def __init__( for this rate limiter. from the rest in the metrics """ + self.reactor = reactor self.clock = clock self.metrics_name = metrics_name @@ -364,12 +370,22 @@ def on_both(r: object) -> object: def _on_exit(self, request_id: object) -> None: logger.debug("Ratelimit(%s) [%s]: Processed req", self.host, id(request_id)) - self.current_processing.discard(request_id) - try: - # start processing the next item on the queue. - _, deferred = self.ready_request_queue.popitem(last=False) - with PreserveLoggingContext(): - deferred.callback(None) - except KeyError: - pass + # When requests complete synchronously, we will recursively start the next + # request in the queue. To avoid stack exhaustion, we defer starting the next + # request until the next reactor tick. + + def start_next_request() -> None: + # We only remove the completed request from the list when we're about to + # start the next one, otherwise we can allow extra requests through. + self.current_processing.discard(request_id) + try: + # start processing the next item on the queue. + _, deferred = self.ready_request_queue.popitem(last=False) + + with PreserveLoggingContext(): + deferred.callback(None) + except KeyError: + pass + + self.reactor.callLater(0.0, start_next_request) diff --git a/tests/util/test_ratelimitutils.py b/tests/util/test_ratelimitutils.py index 5b327b390ef2..2f3ea15b965f 100644 --- a/tests/util/test_ratelimitutils.py +++ b/tests/util/test_ratelimitutils.py @@ -13,6 +13,7 @@ # limitations under the License. from typing import Optional +from twisted.internet import defer from twisted.internet.defer import Deferred from synapse.config.homeserver import HomeServerConfig @@ -29,7 +30,7 @@ def test_ratelimit(self) -> None: """A simple test with the default values""" reactor, clock = get_clock() rc_config = build_rc_config() - ratelimiter = FederationRateLimiter(clock, rc_config) + ratelimiter = FederationRateLimiter(reactor, clock, rc_config) with ratelimiter.ratelimit("testhost") as d1: # shouldn't block @@ -39,7 +40,7 @@ def test_concurrent_limit(self) -> None: """Test what happens when we hit the concurrent limit""" reactor, clock = get_clock() rc_config = build_rc_config({"rc_federation": {"concurrent": 2}}) - ratelimiter = FederationRateLimiter(clock, rc_config) + ratelimiter = FederationRateLimiter(reactor, clock, rc_config) with ratelimiter.ratelimit("testhost") as d1: # shouldn't block @@ -57,6 +58,7 @@ def test_concurrent_limit(self) -> None: # ... until we complete an earlier request cm2.__exit__(None, None, None) + reactor.advance(0.0) self.successResultOf(d3) def test_sleep_limit(self) -> None: @@ -65,7 +67,7 @@ def test_sleep_limit(self) -> None: rc_config = build_rc_config( {"rc_federation": {"sleep_limit": 2, "sleep_delay": 500}} ) - ratelimiter = FederationRateLimiter(clock, rc_config) + ratelimiter = FederationRateLimiter(reactor, clock, rc_config) with ratelimiter.ratelimit("testhost") as d1: # shouldn't block @@ -81,6 +83,43 @@ def test_sleep_limit(self) -> None: sleep_time = _await_resolution(reactor, d3) self.assertAlmostEqual(sleep_time, 500, places=3) + def test_lots_of_queued_things(self) -> None: + """Tests lots of synchronous things queued up behind a slow thing. + + The stack should *not* explode when the slow thing completes. + """ + reactor, clock = get_clock() + rc_config = build_rc_config( + { + "rc_federation": { + "sleep_limit": 1000000000, # never sleep + "reject_limit": 1000000000, # never reject requests + "concurrent": 1, + } + } + ) + ratelimiter = FederationRateLimiter(reactor, clock, rc_config) + + with ratelimiter.ratelimit("testhost") as d: + # shouldn't block + self.successResultOf(d) + + async def task() -> None: + with ratelimiter.ratelimit("testhost") as d: + await d + + for _ in range(1, 100): + defer.ensureDeferred(task()) + + last_task = defer.ensureDeferred(task()) + + # Upon exiting the context manager, all the synchronous things will resume. + # If a stack overflow occurs, the final task will not complete. + + # Wait for all the things to complete. + reactor.advance(0.0) + self.successResultOf(last_task) + def _await_resolution(reactor: ThreadedMemoryReactorClock, d: Deferred) -> float: """advance the clock until the deferred completes. From 3a125625e70634075cc4d965a01309af56748eb2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 13 Jan 2023 12:37:28 +0000 Subject: [PATCH 014/344] Add some clarifying comments and refactor a portion of the `Keyring` class for readability (#14804) --- changelog.d/14804.misc | 1 + synapse/crypto/keyring.py | 61 +++++++++++++++++++++++++++------------ 2 files changed, 44 insertions(+), 18 deletions(-) create mode 100644 changelog.d/14804.misc diff --git a/changelog.d/14804.misc b/changelog.d/14804.misc new file mode 100644 index 000000000000..24302332bd64 --- /dev/null +++ b/changelog.d/14804.misc @@ -0,0 +1 @@ +Add some clarifying comments and refactor a portion of the `Keyring` class for readability. \ No newline at end of file diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 69310d90351c..86cd4af9bd5a 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -154,17 +154,21 @@ def __init__( if key_fetchers is None: key_fetchers = ( + # Fetch keys from the database. StoreKeyFetcher(hs), + # Fetch keys from a configured Perspectives server. PerspectivesKeyFetcher(hs), + # Fetch keys from the origin server directly. ServerKeyFetcher(hs), ) self._key_fetchers = key_fetchers - self._server_queue: BatchingQueue[ + self._fetch_keys_queue: BatchingQueue[ _FetchKeyRequest, Dict[str, Dict[str, FetchKeyResult]] ] = BatchingQueue( "keyring_server", clock=hs.get_clock(), + # The method called to fetch each key process_batch_callback=self._inner_fetch_key_requests, ) @@ -287,7 +291,7 @@ async def process_request(self, verify_request: VerifyJsonRequest) -> None: minimum_valid_until_ts=verify_request.minimum_valid_until_ts, key_ids=list(key_ids_to_find), ) - found_keys_by_server = await self._server_queue.add_to_queue( + found_keys_by_server = await self._fetch_keys_queue.add_to_queue( key_request, key=verify_request.server_name ) @@ -352,7 +356,17 @@ async def _process_json( async def _inner_fetch_key_requests( self, requests: List[_FetchKeyRequest] ) -> Dict[str, Dict[str, FetchKeyResult]]: - """Processing function for the queue of `_FetchKeyRequest`.""" + """Processing function for the queue of `_FetchKeyRequest`. + + Takes a list of key fetch requests, de-duplicates them and then carries out + each request by invoking self._inner_fetch_key_request. + + Args: + requests: A list of requests for homeserver verify keys. + + Returns: + {server name: {key id: fetch key result}} + """ logger.debug("Starting fetch for %s", requests) @@ -397,8 +411,23 @@ async def _inner_fetch_key_requests( async def _inner_fetch_key_request( self, verify_request: _FetchKeyRequest ) -> Dict[str, FetchKeyResult]: - """Attempt to fetch the given key by calling each key fetcher one by - one. + """Attempt to fetch the given key by calling each key fetcher one by one. + + If a key is found, check whether its `valid_until_ts` attribute satisfies the + `minimum_valid_until_ts` attribute of the `verify_request`. If it does, we + refrain from asking subsequent fetchers for that key. + + Even if the above check fails, we still return the found key - the caller may + still find the invalid key result useful. In this case, we continue to ask + subsequent fetchers for the invalid key, in case they return a valid result + for it. This can happen when fetching a stale key result from the database, + before querying the origin server for an up-to-date result. + + Args: + verify_request: The request for a verify key. Can include multiple key IDs. + + Returns: + A map of {key_id: the key fetch result}. """ logger.debug("Starting fetch for %s", verify_request) @@ -420,26 +449,22 @@ async def _inner_fetch_key_request( if not key: continue - # If we already have a result for the given key ID we keep the + # If we already have a result for the given key ID, we keep the # one with the highest `valid_until_ts`. existing_key = found_keys.get(key_id) - if existing_key: - if key.valid_until_ts <= existing_key.valid_until_ts: - continue + if existing_key and existing_key.valid_until_ts > key.valid_until_ts: + continue + + # Check if this key's expiry timestamp is valid for the verify request. + if key.valid_until_ts >= verify_request.minimum_valid_until_ts: + # Stop looking for this key from subsequent fetchers. + missing_key_ids.discard(key_id) - # We always store the returned key even if it doesn't the + # We always store the returned key even if it doesn't meet the # `minimum_valid_until_ts` requirement, as some verification # requests may still be able to be satisfied by it. - # - # We still keep looking for the key from other fetchers in that - # case though. found_keys[key_id] = key - if key.valid_until_ts < verify_request.minimum_valid_until_ts: - continue - - missing_key_ids.discard(key_id) - return found_keys From d344bc8b6ea14210614845f4e0af776013f86459 Mon Sep 17 00:00:00 2001 From: villepeh <100730729+villepeh@users.noreply.github.com> Date: Fri, 13 Jan 2023 16:06:58 +0200 Subject: [PATCH 015/344] Include `x_forwarded` in workers example configs (#14667) --- changelog.d/14667.doc | 1 + .../create-multiple-generic-workers.md | 6 +++--- .../create-multiple-stream-writers.md | 10 ++++++++-- docs/systemd-with-workers/workers/event_persister.yaml | 1 + docs/systemd-with-workers/workers/generic_worker.yaml | 3 +-- docs/systemd-with-workers/workers/media_worker.yaml | 1 + 6 files changed, 15 insertions(+), 7 deletions(-) create mode 100644 changelog.d/14667.doc diff --git a/changelog.d/14667.doc b/changelog.d/14667.doc new file mode 100644 index 000000000000..86d6288121da --- /dev/null +++ b/changelog.d/14667.doc @@ -0,0 +1 @@ +Include `x_forwarded` entry in the HTTP listener example configs and remove the remaining `worker_main_http_uri` entries. diff --git a/contrib/workers-bash-scripts/create-multiple-generic-workers.md b/contrib/workers-bash-scripts/create-multiple-generic-workers.md index c9be707b3c6e..63d0038a7d8a 100644 --- a/contrib/workers-bash-scripts/create-multiple-generic-workers.md +++ b/contrib/workers-bash-scripts/create-multiple-generic-workers.md @@ -15,19 +15,19 @@ worker_name: generic_worker$i worker_replication_host: 127.0.0.1 worker_replication_http_port: 9093 -worker_main_http_uri: http://localhost:8008/ - worker_listeners: - type: http port: 808$i + x_forwarded: true resources: - names: [client, federation] worker_log_config: /etc/matrix-synapse/generic-worker-log.yaml +#worker_pid_file: DATADIR/generic_worker$i.pid EOF done ``` This would create five generic workers with a unique `worker_name` field in each file and listening on ports 8081-8085. -Customise the script to your needs. +Customise the script to your needs. Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed. diff --git a/contrib/workers-bash-scripts/create-multiple-stream-writers.md b/contrib/workers-bash-scripts/create-multiple-stream-writers.md index 0d2ca780a6a3..efa5dea3050a 100644 --- a/contrib/workers-bash-scripts/create-multiple-stream-writers.md +++ b/contrib/workers-bash-scripts/create-multiple-stream-writers.md @@ -8,7 +8,9 @@ It also prints out the example lines for Synapse main configuration file. Remember to route necessary endpoints directly to a worker associated with it. -If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array. +If you run the script as-is, it will create workers with the replication listener starting from port 8034 and another, regular http listener starting from 8044. If you don't need all of the stream writers listed in the script, just remove them from the ```STREAM_WRITERS``` array. + +Hint: Note that `worker_pid_file` is required if `worker_daemonize` is `true`. Uncomment and/or modify the line if needed. ```sh #!/bin/bash @@ -46,9 +48,11 @@ worker_listeners: - type: http port: $(expr $HTTP_START_PORT + $i) + x_forwarded: true resources: - names: [client] +#worker_pid_file: DATADIR/${STREAM_WRITERS[$i]}.pid worker_log_config: /etc/matrix-synapse/stream-writer-log.yaml EOF HOMESERVER_YAML_INSTANCE_MAP+=$" ${STREAM_WRITERS[$i]}_stream_writer: @@ -91,7 +95,9 @@ Simply run the script to create YAML files in the current folder and print out t ```console $ ./create_stream_writers.sh - +``` +You should receive an output similar to the following: +```console # Add these lines to your homeserver.yaml. # Don't forget to configure your reverse proxy and # necessary endpoints to their respective worker. diff --git a/docs/systemd-with-workers/workers/event_persister.yaml b/docs/systemd-with-workers/workers/event_persister.yaml index 9bc6997bad99..c11d5897b18e 100644 --- a/docs/systemd-with-workers/workers/event_persister.yaml +++ b/docs/systemd-with-workers/workers/event_persister.yaml @@ -17,6 +17,7 @@ worker_listeners: # #- type: http # port: 8035 + # x_forwarded: true # resources: # - names: [client] diff --git a/docs/systemd-with-workers/workers/generic_worker.yaml b/docs/systemd-with-workers/workers/generic_worker.yaml index 6e7b60886e72..a858f99ed1d9 100644 --- a/docs/systemd-with-workers/workers/generic_worker.yaml +++ b/docs/systemd-with-workers/workers/generic_worker.yaml @@ -5,11 +5,10 @@ worker_name: generic_worker1 worker_replication_host: 127.0.0.1 worker_replication_http_port: 9093 -worker_main_http_uri: http://localhost:8008/ - worker_listeners: - type: http port: 8083 + x_forwarded: true resources: - names: [client, federation] diff --git a/docs/systemd-with-workers/workers/media_worker.yaml b/docs/systemd-with-workers/workers/media_worker.yaml index eb34d1249231..8ad046f11a5b 100644 --- a/docs/systemd-with-workers/workers/media_worker.yaml +++ b/docs/systemd-with-workers/workers/media_worker.yaml @@ -8,6 +8,7 @@ worker_replication_http_port: 9093 worker_listeners: - type: http port: 8085 + x_forwarded: true resources: - names: [media] From 1caf16a4509fef337b49a8c9d08cdc4494447d7d Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 13 Jan 2023 15:14:39 +0100 Subject: [PATCH 016/344] Add `worker_manhole` to configuration manual (#14824) Closes: #13643 --- changelog.d/14824.doc | 1 + .../configuration/config_documentation.md | 21 +++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 changelog.d/14824.doc diff --git a/changelog.d/14824.doc b/changelog.d/14824.doc new file mode 100644 index 000000000000..172d37baf251 --- /dev/null +++ b/changelog.d/14824.doc @@ -0,0 +1 @@ +Add `worker_manhole` to configuration manual. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 294dd6edddcd..35de9c95e345 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -4029,6 +4029,27 @@ worker_listeners: resources: - names: [client, federation] ``` +--- +### `worker_manhole` + +A worker may have a listener for [`manhole`](../../manhole.md). +It allows server administrators to access a Python shell on the worker. + +Example configuration: +```yaml +worker_manhole: 9000 +``` + +This is a short form for: +```yaml +worker_listeners: + - port: 9000 + bind_addresses: ['127.0.0.1'] + type: manhole +``` + +It needs also an additional [`manhole_settings`](#manhole_settings) configuration. + --- ### `worker_daemonize` From 8d5325ec0c04c3b0f08e0c5b4a26c5939d9db7f1 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 13 Jan 2023 15:17:03 +0100 Subject: [PATCH 017/344] Drop unused table `presence` (#14825) --- changelog.d/14825.misc | 1 + scripts-dev/database-save.sh | 1 - .../schema/main/delta/73/25drop_presence.sql | 17 +++++++++++++++++ 3 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14825.misc create mode 100644 synapse/storage/schema/main/delta/73/25drop_presence.sql diff --git a/changelog.d/14825.misc b/changelog.d/14825.misc new file mode 100644 index 000000000000..64312ac09e2a --- /dev/null +++ b/changelog.d/14825.misc @@ -0,0 +1 @@ +Drop unused table `presence`. \ No newline at end of file diff --git a/scripts-dev/database-save.sh b/scripts-dev/database-save.sh index 040c8a494319..91674027ae29 100755 --- a/scripts-dev/database-save.sh +++ b/scripts-dev/database-save.sh @@ -11,6 +11,5 @@ sqlite3 "$1" <<'EOF' >table-save.sql .dump users .dump access_tokens -.dump presence .dump profiles EOF diff --git a/synapse/storage/schema/main/delta/73/25drop_presence.sql b/synapse/storage/schema/main/delta/73/25drop_presence.sql new file mode 100644 index 000000000000..9f6ffa20b613 --- /dev/null +++ b/synapse/storage/schema/main/delta/73/25drop_presence.sql @@ -0,0 +1,17 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- this table is unused +DROP TABLE presence; From 1416096527be9fc7edf29a087515f0ac62ce8419 Mon Sep 17 00:00:00 2001 From: Tejaswini Gurram <47633397+tjay27@users.noreply.github.com> Date: Fri, 13 Jan 2023 20:16:21 +0530 Subject: [PATCH 018/344] Update misleading documentation ` user_directory.search_all_users ` (#14818) Fixes #13852 --- changelog.d/14818.doc | 1 + docs/usage/configuration/config_documentation.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14818.doc diff --git a/changelog.d/14818.doc b/changelog.d/14818.doc new file mode 100644 index 000000000000..7a47cc8ab35e --- /dev/null +++ b/changelog.d/14818.doc @@ -0,0 +1 @@ +Updated documentation in configuration manual for `user_directory.search_all_users`. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 35de9c95e345..3481e866f705 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3472,8 +3472,8 @@ This setting defines options related to the user directory. This option has the following sub-options: * `enabled`: Defines whether users can search the user directory. If false then empty responses are returned to all queries. Defaults to true. -* `search_all_users`: Defines whether to search all users visible to your HS when searching - the user directory. If false, search results will only contain users +* `search_all_users`: Defines whether to search all users visible to your HS at the time the search is performed. If set to true, will return all users who share a room with the user from the homeserver. + If false, search results will only contain users visible in public rooms and users sharing a room with the requester. Defaults to false. From 73ff493dfba63541a09eaf08587eb8bbd3330967 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 13 Jan 2023 14:57:43 +0000 Subject: [PATCH 019/344] Merge account data streams (#14826) --- changelog.d/14826.misc | 1 + docs/upgrade.md | 12 +++++ synapse/api/constants.py | 1 + synapse/handlers/account_data.py | 7 ++- synapse/handlers/initial_sync.py | 8 +-- synapse/handlers/sync.py | 11 +++- synapse/replication/tcp/client.py | 3 +- synapse/replication/tcp/handler.py | 3 +- synapse/replication/tcp/streams/__init__.py | 3 -- synapse/replication/tcp/streams/_base.py | 49 +++++++++-------- .../storage/databases/main/account_data.py | 6 +-- synapse/storage/databases/main/tags.py | 54 +++++-------------- 12 files changed, 75 insertions(+), 83 deletions(-) create mode 100644 changelog.d/14826.misc diff --git a/changelog.d/14826.misc b/changelog.d/14826.misc new file mode 100644 index 000000000000..9ebedcf51e7c --- /dev/null +++ b/changelog.d/14826.misc @@ -0,0 +1 @@ +Merge tag and normal account data replication streams. diff --git a/docs/upgrade.md b/docs/upgrade.md index c4bc5889a95f..8a76172e43cc 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,18 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.76.0 + +## Changes to the account data replication streams + +Synapse has changed the format of the account data replication streams (between +workers). This is a forwards- and backwards-incompatible change: v1.75 workers +cannot process account data replicated by v1.76 workers, and vice versa. + +Once all workers are upgraded to v1.76 (or downgraded to v1.75), account data +replication will resume as normal. + + # Upgrading to v1.74.0 ## Unicode support in user search diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 6a5e7171da90..6432d32d8376 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -249,6 +249,7 @@ class RoomEncryptionAlgorithms: class AccountDataTypes: DIRECT: Final = "m.direct" IGNORED_USER_LIST: Final = "m.ignored_user_list" + TAG: Final = "m.tag" class HistoryVisibility: diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index aba7315cf730..834006356a28 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -16,6 +16,7 @@ import random from typing import TYPE_CHECKING, Awaitable, Callable, Collection, List, Optional, Tuple +from synapse.api.constants import AccountDataTypes from synapse.replication.http.account_data import ( ReplicationAddRoomAccountDataRestServlet, ReplicationAddTagRestServlet, @@ -335,7 +336,11 @@ async def get_new_events( for room_id, room_tags in tags.items(): results.append( - {"type": "m.tag", "content": {"tags": room_tags}, "room_id": room_id} + { + "type": AccountDataTypes.TAG, + "content": {"tags": room_tags}, + "room_id": room_id, + } ) ( diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 9c335e6863f4..8c2260ad7dfa 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -15,7 +15,7 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple, cast -from synapse.api.constants import EduTypes, EventTypes, Membership +from synapse.api.constants import AccountDataTypes, EduTypes, EventTypes, Membership from synapse.api.errors import SynapseError from synapse.events import EventBase from synapse.events.utils import SerializeEventConfig @@ -239,7 +239,7 @@ async def handle_room(event: RoomsForUser) -> None: tags = tags_by_room.get(event.room_id) if tags: account_data_events.append( - {"type": "m.tag", "content": {"tags": tags}} + {"type": AccountDataTypes.TAG, "content": {"tags": tags}} ) account_data = account_data_by_room.get(event.room_id, {}) @@ -326,7 +326,9 @@ async def room_initial_sync( account_data_events = [] tags = await self.store.get_tags_for_room(user_id, room_id) if tags: - account_data_events.append({"type": "m.tag", "content": {"tags": tags}}) + account_data_events.append( + {"type": AccountDataTypes.TAG, "content": {"tags": tags}} + ) account_data = await self.store.get_account_data_for_room(user_id, room_id) for account_data_type, content in account_data.items(): diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 20ee2f203a73..78d488f2b1cb 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -31,7 +31,12 @@ import attr from prometheus_client import Counter -from synapse.api.constants import EventContentFields, EventTypes, Membership +from synapse.api.constants import ( + AccountDataTypes, + EventContentFields, + EventTypes, + Membership, +) from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState from synapse.api.room_versions import KNOWN_ROOM_VERSIONS @@ -2331,7 +2336,9 @@ async def _generate_room_entry( account_data_events = [] if tags is not None: - account_data_events.append({"type": "m.tag", "content": {"tags": tags}}) + account_data_events.append( + {"type": AccountDataTypes.TAG, "content": {"tags": tags}} + ) for account_data_type, content in account_data.items(): account_data_events.append( diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index b5e40da5337e..7263bb2796da 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -33,7 +33,6 @@ PushersStream, PushRulesStream, ReceiptsStream, - TagAccountDataStream, ToDeviceStream, TypingStream, UnPartialStatedEventStream, @@ -168,7 +167,7 @@ async def on_rdata( self.notifier.on_new_event( StreamKeyType.PUSH_RULES, token, users=[row.user_id for row in rows] ) - elif stream_name in (AccountDataStream.NAME, TagAccountDataStream.NAME): + elif stream_name in AccountDataStream.NAME: self.notifier.on_new_event( StreamKeyType.ACCOUNT_DATA, token, users=[row.user_id for row in rows] ) diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 0f166d16aa5f..d03a53d76429 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -58,7 +58,6 @@ PresenceStream, ReceiptsStream, Stream, - TagAccountDataStream, ToDeviceStream, TypingStream, ) @@ -145,7 +144,7 @@ def __init__(self, hs: "HomeServer"): continue - if isinstance(stream, (AccountDataStream, TagAccountDataStream)): + if isinstance(stream, AccountDataStream): # Only add AccountDataStream and TagAccountDataStream as a source on the # instance in charge of account_data persistence. if hs.get_instance_name() in hs.config.worker.writers.account_data: diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py index 110f10aab9a5..a7eadfa3c9f9 100644 --- a/synapse/replication/tcp/streams/__init__.py +++ b/synapse/replication/tcp/streams/__init__.py @@ -35,7 +35,6 @@ PushRulesStream, ReceiptsStream, Stream, - TagAccountDataStream, ToDeviceStream, TypingStream, UserSignatureStream, @@ -62,7 +61,6 @@ DeviceListsStream, ToDeviceStream, FederationStream, - TagAccountDataStream, AccountDataStream, UserSignatureStream, UnPartialStatedRoomStream, @@ -83,7 +81,6 @@ "CachesStream", "DeviceListsStream", "ToDeviceStream", - "TagAccountDataStream", "AccountDataStream", "UserSignatureStream", "UnPartialStatedRoomStream", diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index e01155ad597b..fbf78da9c254 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -28,8 +28,8 @@ import attr +from synapse.api.constants import AccountDataTypes from synapse.replication.http.streams import ReplicationGetStreamUpdates -from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer @@ -495,27 +495,6 @@ def __init__(self, hs: "HomeServer"): ) -class TagAccountDataStream(Stream): - """Someone added/removed a tag for a room""" - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class TagAccountDataStreamRow: - user_id: str - room_id: str - data: JsonDict - - NAME = "tag_account_data" - ROW_TYPE = TagAccountDataStreamRow - - def __init__(self, hs: "HomeServer"): - store = hs.get_datastores().main - super().__init__( - hs.get_instance_name(), - current_token_without_instance(store.get_max_account_data_stream_id), - store.get_all_updated_tags, - ) - - class AccountDataStream(Stream): """Global or per room account data was changed""" @@ -560,6 +539,19 @@ async def _update_function( to_token = room_results[-1][0] limited = True + tags, tag_to_token, tags_limited = await self.store.get_all_updated_tags( + instance_name, + from_token, + to_token, + limit, + ) + + # again, if the tag results hit the limit, limit the global results to + # the same stream token. + if tags_limited: + to_token = tag_to_token + limited = True + # convert the global results to the right format, and limit them to the to_token # at the same time global_rows = ( @@ -568,11 +560,16 @@ async def _update_function( if stream_id <= to_token ) - # we know that the room_results are already limited to `to_token` so no need - # for a check on `stream_id` here. room_rows = ( (stream_id, (user_id, room_id, account_data_type)) for stream_id, user_id, room_id, account_data_type in room_results + if stream_id <= to_token + ) + + tag_rows = ( + (stream_id, (user_id, room_id, AccountDataTypes.TAG)) + for stream_id, user_id, room_id in tags + if stream_id <= to_token ) # We need to return a sorted list, so merge them together. @@ -582,7 +579,9 @@ async def _update_function( # leading to a comparison between the data tuples. The comparison could # fail due to attempting to compare the `room_id` which results in a # `TypeError` from comparing a `str` vs `None`. - updates = list(heapq.merge(room_rows, global_rows, key=lambda row: row[0])) + updates = list( + heapq.merge(room_rows, global_rows, tag_rows, key=lambda row: row[0]) + ) return updates, to_token, limited diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 86032897f53a..881d7089dbb2 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -27,7 +27,7 @@ ) from synapse.api.constants import AccountDataTypes -from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream +from synapse.replication.tcp.streams import AccountDataStream from synapse.storage._base import db_to_json from synapse.storage.database import ( DatabasePool, @@ -454,9 +454,7 @@ def process_replication_rows( def process_replication_position( self, stream_name: str, instance_name: str, token: int ) -> None: - if stream_name == TagAccountDataStream.NAME: - self._account_data_id_gen.advance(instance_name, token) - elif stream_name == AccountDataStream.NAME: + if stream_name == AccountDataStream.NAME: self._account_data_id_gen.advance(instance_name, token) super().process_replication_position(stream_name, instance_name, token) diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index e23c927e02f2..d5500cdd470c 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -17,7 +17,8 @@ import logging from typing import Any, Dict, Iterable, List, Tuple, cast -from synapse.replication.tcp.streams import TagAccountDataStream +from synapse.api.constants import AccountDataTypes +from synapse.replication.tcp.streams import AccountDataStream from synapse.storage._base import db_to_json from synapse.storage.database import LoggingTransaction from synapse.storage.databases.main.account_data import AccountDataWorkerStore @@ -54,7 +55,7 @@ async def get_tags_for_user(self, user_id: str) -> Dict[str, Dict[str, JsonDict] async def get_all_updated_tags( self, instance_name: str, last_id: int, current_id: int, limit: int - ) -> Tuple[List[Tuple[int, Tuple[str, str, str]]], int, bool]: + ) -> Tuple[List[Tuple[int, str, str]], int, bool]: """Get updates for tags replication stream. Args: @@ -73,7 +74,7 @@ async def get_all_updated_tags( The token returned can be used in a subsequent call to this function to get further updatees. - The updates are a list of 2-tuples of stream ID and the row data + The updates are a list of tuples of stream ID, user ID and room ID """ if last_id == current_id: @@ -96,38 +97,13 @@ def get_all_updated_tags_txn( "get_all_updated_tags", get_all_updated_tags_txn ) - def get_tag_content( - txn: LoggingTransaction, tag_ids: List[Tuple[int, str, str]] - ) -> List[Tuple[int, Tuple[str, str, str]]]: - sql = "SELECT tag, content FROM room_tags WHERE user_id=? AND room_id=?" - results = [] - for stream_id, user_id, room_id in tag_ids: - txn.execute(sql, (user_id, room_id)) - tags = [] - for tag, content in txn: - tags.append(json_encoder.encode(tag) + ":" + content) - tag_json = "{" + ",".join(tags) + "}" - results.append((stream_id, (user_id, room_id, tag_json))) - - return results - - batch_size = 50 - results = [] - for i in range(0, len(tag_ids), batch_size): - tags = await self.db_pool.runInteraction( - "get_all_updated_tag_content", - get_tag_content, - tag_ids[i : i + batch_size], - ) - results.extend(tags) - limited = False upto_token = current_id - if len(results) >= limit: - upto_token = results[-1][0] + if len(tag_ids) >= limit: + upto_token = tag_ids[-1][0] limited = True - return results, upto_token, limited + return tag_ids, upto_token, limited async def get_updated_tags( self, user_id: str, stream_id: int @@ -299,20 +275,16 @@ def process_replication_rows( token: int, rows: Iterable[Any], ) -> None: - if stream_name == TagAccountDataStream.NAME: + if stream_name == AccountDataStream.NAME: for row in rows: - self.get_tags_for_user.invalidate((row.user_id,)) - self._account_data_stream_cache.entity_has_changed(row.user_id, token) + if row.data_type == AccountDataTypes.TAG: + self.get_tags_for_user.invalidate((row.user_id,)) + self._account_data_stream_cache.entity_has_changed( + row.user_id, token + ) super().process_replication_rows(stream_name, instance_name, token, rows) - def process_replication_position( - self, stream_name: str, instance_name: str, token: int - ) -> None: - if stream_name == TagAccountDataStream.NAME: - self._account_data_id_gen.advance(instance_name, token) - super().process_replication_position(stream_name, instance_name, token) - class TagsStore(TagsWorkerStore): pass From 52ae80dd1afd9bb5b4cf2bb79297e1590f92cacb Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 13 Jan 2023 17:58:53 +0000 Subject: [PATCH 020/344] Use stable identifiers for faster joins (#14832) * Use new query param when requesting a partial join * Read new query param when serving partial join * Provide new field names when serving partial joins * Read new field names from partial join response * Changelog --- changelog.d/14832.misc | 1 + synapse/federation/federation_server.py | 2 + synapse/federation/transport/client.py | 18 +++++ .../federation/transport/server/federation.py | 13 +++- tests/federation/test_federation_server.py | 2 +- tests/federation/transport/test_client.py | 77 ++++++++++++++----- 6 files changed, 89 insertions(+), 24 deletions(-) create mode 100644 changelog.d/14832.misc diff --git a/changelog.d/14832.misc b/changelog.d/14832.misc new file mode 100644 index 000000000000..61e7401e43dc --- /dev/null +++ b/changelog.d/14832.misc @@ -0,0 +1 @@ +Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index bb20af6e91ed..c65dbf87fba4 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -725,10 +725,12 @@ async def on_send_join_request( "state": [p.get_pdu_json(time_now) for p in state_events], "auth_chain": [p.get_pdu_json(time_now) for p in auth_chain_events], "org.matrix.msc3706.partial_state": caller_supports_partial_state, + "members_omitted": caller_supports_partial_state, } if servers_in_room is not None: resp["org.matrix.msc3706.servers_in_room"] = list(servers_in_room) + resp["servers_in_room"] = list(servers_in_room) return resp diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 77f1f39cacb1..c8471d4cf745 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -357,6 +357,7 @@ async def send_join_v2( if self._faster_joins_enabled: # lazy-load state on join query_params["org.matrix.msc3706.partial_state"] = "true" + query_params["omit_members"] = "true" return await self.client.put_json( destination=destination, @@ -909,6 +910,14 @@ def __init__(self, room_version: RoomVersion, v1_api: bool): use_float="True", ) ) + # The stable field name comes last, so it "wins" if the fields disagree + self._coros.append( + ijson.items_coro( + _partial_state_parser(self._response), + "members_omitted", + use_float="True", + ) + ) self._coros.append( ijson.items_coro( @@ -918,6 +927,15 @@ def __init__(self, room_version: RoomVersion, v1_api: bool): ) ) + # Again, stable field name comes last + self._coros.append( + ijson.items_coro( + _servers_in_room_parser(self._response), + "servers_in_room", + use_float="True", + ) + ) + def write(self, data: bytes) -> int: for c in self._coros: c.send(data) diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 53e77b4bb62b..c0a700905b8b 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -437,9 +437,16 @@ async def on_PUT( partial_state = False if self._msc3706_enabled: - partial_state = parse_boolean_from_args( - query, "org.matrix.msc3706.partial_state", default=False - ) + # The stable query parameter wins, if it disagrees with the unstable + # parameter for some reason. + stable_param = parse_boolean_from_args(query, "omit_members", default=None) + if stable_param is not None: + partial_state = stable_param + else: + partial_state = parse_boolean_from_args( + query, "org.matrix.msc3706.partial_state", default=False + ) + result = await self.handler.on_send_join_request( origin, content, room_id, caller_supports_partial_state=partial_state ) diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 177e5b5afce3..27770304be43 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -224,7 +224,7 @@ def test_send_join_partial_state(self) -> None: ) channel = self.make_signed_federation_request( "PUT", - f"/_matrix/federation/v2/send_join/{self._room_id}/x?org.matrix.msc3706.partial_state=true", + f"/_matrix/federation/v2/send_join/{self._room_id}/x?omit_members=true", content=join_event_dict, ) self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py index b84c74fc0eca..c90635e0a0d7 100644 --- a/tests/federation/transport/test_client.py +++ b/tests/federation/transport/test_client.py @@ -13,12 +13,14 @@ # limitations under the License. import json +from typing import List, Optional from unittest.mock import Mock import ijson.common from synapse.api.room_versions import RoomVersions from synapse.federation.transport.client import SendJoinParser +from synapse.types import JsonDict from synapse.util import ExceptionBundle from tests.unittest import TestCase @@ -71,33 +73,68 @@ def test_two_writes(self) -> None: def test_partial_state(self) -> None: """Check that the partial_state flag is correctly parsed""" - parser = SendJoinParser(RoomVersions.V1, False) - response = { - "org.matrix.msc3706.partial_state": True, - } - serialised_response = json.dumps(response).encode() + def parse(response: JsonDict) -> bool: + parser = SendJoinParser(RoomVersions.V1, False) + serialised_response = json.dumps(response).encode() - # Send data to the parser - parser.write(serialised_response) + # Send data to the parser + parser.write(serialised_response) - # Retrieve and check the parsed SendJoinResponse - parsed_response = parser.finish() - self.assertTrue(parsed_response.partial_state) + # Retrieve and check the parsed SendJoinResponse + parsed_response = parser.finish() + return parsed_response.partial_state - def test_servers_in_room(self) -> None: - """Check that the servers_in_room field is correctly parsed""" - parser = SendJoinParser(RoomVersions.V1, False) - response = {"org.matrix.msc3706.servers_in_room": ["hs1", "hs2"]} + self.assertTrue(parse({"members_omitted": True})) + self.assertTrue(parse({"org.matrix.msc3706.partial_state": True})) - serialised_response = json.dumps(response).encode() + self.assertFalse(parse({"members_omitted": False})) + self.assertFalse(parse({"org.matrix.msc3706.partial_state": False})) - # Send data to the parser - parser.write(serialised_response) + # If there's a conflict, the stable field wins. + self.assertTrue( + parse({"members_omitted": True, "org.matrix.msc3706.partial_state": False}) + ) + self.assertFalse( + parse({"members_omitted": False, "org.matrix.msc3706.partial_state": True}) + ) - # Retrieve and check the parsed SendJoinResponse - parsed_response = parser.finish() - self.assertEqual(parsed_response.servers_in_room, ["hs1", "hs2"]) + def test_servers_in_room(self) -> None: + """Check that the servers_in_room field is correctly parsed""" + + def parse(response: JsonDict) -> Optional[List[str]]: + parser = SendJoinParser(RoomVersions.V1, False) + serialised_response = json.dumps(response).encode() + + # Send data to the parser + parser.write(serialised_response) + + # Retrieve and check the parsed SendJoinResponse + parsed_response = parser.finish() + return parsed_response.servers_in_room + + self.assertEqual( + parse({"org.matrix.msc3706.servers_in_room": ["hs1", "hs2"]}), + ["hs1", "hs2"], + ) + self.assertEqual(parse({"servers_in_room": ["example.com"]}), ["example.com"]) + + # If both are provided, the stable identifier should win + self.assertEqual( + parse( + { + "org.matrix.msc3706.servers_in_room": ["old"], + "servers_in_room": ["new"], + } + ), + ["new"], + ) + + # And lastly, we should be able to tell if neither field was present. + self.assertEqual( + parse({}), + None, + ) def test_errors_closing_coroutines(self) -> None: """Check we close all coroutines, even if closing the first raises an Exception. From 54cd90ea60610a6dc24a291dd0cad4ce9bea8728 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 13 Jan 2023 19:32:10 +0000 Subject: [PATCH 021/344] Implement MSC3890: Remotely silence local notifications (#14775) --- changelog.d/14775.feature | 1 + .../complement/conf/workers-shared-extra.yaml.j2 | 2 ++ scripts-dev/complement.sh | 2 +- synapse/config/experimental.py | 15 +++++++++++++++ synapse/handlers/device.py | 11 ++++++++++- 5 files changed, 29 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14775.feature diff --git a/changelog.d/14775.feature b/changelog.d/14775.feature new file mode 100644 index 000000000000..7b7ee42cacba --- /dev/null +++ b/changelog.d/14775.feature @@ -0,0 +1 @@ +Implement support for MSC3890: Remotely silence local notifications. \ No newline at end of file diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index cb839fed078d..1170694df501 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -102,6 +102,8 @@ experimental_features: {% endif %} # Filtering /messages by relation type. msc3874_enabled: true + # Enable deleting device-specific notification settings stored in account data + msc3890_enabled: true # Enable removing account data support msc3391_enabled: true diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 51d1bac6183c..7c48d8bccb0f 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -190,7 +190,7 @@ fi extra_test_args=() -test_tags="synapse_blacklist,msc3787,msc3874,msc3391" +test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391" # All environment variables starting with PASS_ will be shared. # (The prefix is stripped off before reaching the container.) diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index a8b2db372d56..72a17e0616d5 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -17,6 +17,7 @@ import attr from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions +from synapse.config import ConfigError from synapse.config._base import Config from synapse.types import JsonDict @@ -93,6 +94,9 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC2815 (allow room moderators to view redacted event content) self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False) + # MSC3391: Removing account data. + self.msc3391_enabled = experimental.get("msc3391_enabled", False) + # MSC3773: Thread notifications self.msc3773_enabled: bool = experimental.get("msc3773_enabled", False) @@ -127,6 +131,17 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "msc3886_endpoint", None ) + # MSC3890: Remotely silence local notifications + # Note: This option requires "experimental_features.msc3391_enabled" to be + # set to "true", in order to communicate account data deletions to clients. + self.msc3890_enabled: bool = experimental.get("msc3890_enabled", False) + if self.msc3890_enabled and not self.msc3391_enabled: + raise ConfigError( + "Option 'experimental_features.msc3391' must be set to 'true' to " + "enable 'experimental_features.msc3890'. MSC3391 functionality is " + "required to communicate account data deletions to clients." + ) + # MSC3912: Relation-based redactions. self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 89864e111941..0640ea79a03d 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -346,6 +346,7 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.federation_sender = hs.get_federation_sender() + self._account_data_handler = hs.get_account_data_handler() self._storage_controllers = hs.get_storage_controllers() self.device_list_updater = DeviceListUpdater(hs, self) @@ -502,7 +503,7 @@ async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: else: raise - # Delete access tokens and e2e keys for each device. Not optimised as it is not + # Delete data specific to each device. Not optimised as it is not # considered as part of a critical path. for device_id in device_ids: await self._auth_handler.delete_access_tokens_for_user( @@ -512,6 +513,14 @@ async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: user_id=user_id, device_id=device_id ) + if self.hs.config.experimental.msc3890_enabled: + # Remove any local notification settings for this device in accordance + # with MSC3890. + await self._account_data_handler.remove_account_data_for_user( + user_id, + f"org.matrix.msc3890.local_notification_settings.{device_id}", + ) + await self.notify_device_update(user_id, device_ids) async def update_device(self, user_id: str, device_id: str, content: dict) -> None: From 5f171c165142aa23f54013faaf42d860c81fb902 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 16 Jan 2023 10:51:55 +0000 Subject: [PATCH 022/344] Bump regex from 1.7.0 to 1.7.1 (#14848) * Bump regex from 1.7.0 to 1.7.1 Bumps [regex](https://github.com/rust-lang/regex) from 1.7.0 to 1.7.1. - [Release notes](https://github.com/rust-lang/regex/releases) - [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md) - [Commits](https://github.com/rust-lang/regex/compare/1.7.0...1.7.1) --- updated-dependencies: - dependency-name: regex dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- Cargo.lock | 4 ++-- changelog.d/14848.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14848.misc diff --git a/Cargo.lock b/Cargo.lock index ace6a8c50aa0..079a3f854e75 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -294,9 +294,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" +checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" dependencies = [ "aho-corasick", "memchr", diff --git a/changelog.d/14848.misc b/changelog.d/14848.misc new file mode 100644 index 000000000000..32aa6c9bc815 --- /dev/null +++ b/changelog.d/14848.misc @@ -0,0 +1 @@ +Bump regex from 1.7.0 to 1.7.1. From 85a7a201fa460c227562111fba4d3d6aef681e23 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 16 Jan 2023 12:40:25 +0000 Subject: [PATCH 023/344] Also use stable name in SendJoinResponse struct (#14841) * Also use stable name in SendJoinResponse struct follow-up to #14832 * Changelog * Fix a rename I missed * Run black * Update synapse/federation/federation_client.py Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/14841.misc | 1 + synapse/federation/federation_client.py | 6 +++--- synapse/federation/federation_server.py | 2 +- synapse/federation/transport/client.py | 16 +++++++++------- tests/federation/transport/test_client.py | 6 +++--- 5 files changed, 17 insertions(+), 14 deletions(-) create mode 100644 changelog.d/14841.misc diff --git a/changelog.d/14841.misc b/changelog.d/14841.misc new file mode 100644 index 000000000000..61e7401e43dc --- /dev/null +++ b/changelog.d/14841.misc @@ -0,0 +1 @@ +Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 137cfb3346d2..b7002e8a6cb3 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1142,9 +1142,9 @@ async def _execute(pdu: EventBase) -> None: % (auth_chain_create_events,) ) - if response.partial_state and not response.servers_in_room: + if response.members_omitted and not response.servers_in_room: raise InvalidResponseError( - "partial_state was set, but no servers were listed in the room" + "members_omitted was set, but no servers were listed in the room" ) return SendJoinResult( @@ -1152,7 +1152,7 @@ async def _execute(pdu: EventBase) -> None: state=signed_state, auth_chain=signed_auth, origin=destination, - partial_state=response.partial_state, + partial_state=response.members_omitted, servers_in_room=response.servers_in_room or [], ) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index c65dbf87fba4..3197939a363d 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -1502,7 +1502,7 @@ def _get_event_ids_for_partial_state_join( prev_state_ids: StateMap[str], summary: Dict[str, MemberSummary], ) -> Collection[str]: - """Calculate state to be retuned in a partial_state send_join + """Calculate state to be returned in a partial_state send_join Args: join_event: the join event being send_joined diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index c8471d4cf745..5ec651400ac5 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -795,7 +795,7 @@ class SendJoinResponse: event: Optional[EventBase] = None # The room state is incomplete - partial_state: bool = False + members_omitted: bool = False # List of servers in the room servers_in_room: Optional[List[str]] = None @@ -835,16 +835,18 @@ def _event_list_parser( @ijson.coroutine -def _partial_state_parser(response: SendJoinResponse) -> Generator[None, Any, None]: +def _members_omitted_parser(response: SendJoinResponse) -> Generator[None, Any, None]: """Helper function for use with `ijson.items_coro` - Parses the partial_state field in send_join responses + Parses the members_omitted field in send_join responses """ while True: val = yield if not isinstance(val, bool): - raise TypeError("partial_state must be a boolean") - response.partial_state = val + raise TypeError( + "members_omitted (formerly org.matrix.msc370c.partial_state) must be a boolean" + ) + response.members_omitted = val @ijson.coroutine @@ -905,7 +907,7 @@ def __init__(self, room_version: RoomVersion, v1_api: bool): if not v1_api: self._coros.append( ijson.items_coro( - _partial_state_parser(self._response), + _members_omitted_parser(self._response), "org.matrix.msc3706.partial_state", use_float="True", ) @@ -913,7 +915,7 @@ def __init__(self, room_version: RoomVersion, v1_api: bool): # The stable field name comes last, so it "wins" if the fields disagree self._coros.append( ijson.items_coro( - _partial_state_parser(self._response), + _members_omitted_parser(self._response), "members_omitted", use_float="True", ) diff --git a/tests/federation/transport/test_client.py b/tests/federation/transport/test_client.py index c90635e0a0d7..3d61b1e8a9ca 100644 --- a/tests/federation/transport/test_client.py +++ b/tests/federation/transport/test_client.py @@ -68,11 +68,11 @@ def test_two_writes(self) -> None: self.assertEqual(len(parsed_response.state), 1, parsed_response) self.assertEqual(parsed_response.event_dict, {}, parsed_response) self.assertIsNone(parsed_response.event, parsed_response) - self.assertFalse(parsed_response.partial_state, parsed_response) + self.assertFalse(parsed_response.members_omitted, parsed_response) self.assertEqual(parsed_response.servers_in_room, None, parsed_response) def test_partial_state(self) -> None: - """Check that the partial_state flag is correctly parsed""" + """Check that the members_omitted flag is correctly parsed""" def parse(response: JsonDict) -> bool: parser = SendJoinParser(RoomVersions.V1, False) @@ -83,7 +83,7 @@ def parse(response: JsonDict) -> bool: # Retrieve and check the parsed SendJoinResponse parsed_response = parser.finish() - return parsed_response.partial_state + return parsed_response.members_omitted self.assertTrue(parse({"members_omitted": True})) self.assertTrue(parse({"org.matrix.msc3706.partial_state": True})) From 7801fd74dacbf69ad6e3299ed2e2a5dcda8997e8 Mon Sep 17 00:00:00 2001 From: Rhea Danzey Date: Mon, 16 Jan 2023 06:59:15 -0600 Subject: [PATCH 024/344] Fix missing field in AS documentation (#14845) * Fix missing field in AS documentation The [AS Configuration Snippet](https://matrix-org.github.io/synapse/latest/application_services.html) is missing `id` field, without it Synapse will fail to load: ``` synapse-synapse-main-0 synapse 2023-01-13 23:05:25,450 - synapse.storage.databases - 84 - INFO - main - [database config 'master']: Starting 'main' database synapse-synapse-main-0 synapse 2023-01-13 23:05:25,452 - synapse.config.appservice - 79 - ERROR - main - Failed to load appservice from '/as/synapse-hookshot-as/registration.yaml' synapse-synapse-main-0 synapse 2023-01-13 23:05:25,452 - synapse.config.appservice - 80 - ERROR - main - "Required string field: 'id' (/as/synapse-hookshot-as/registration.yaml)" synapse-synapse-main-0 synapse Traceback (most recent call last): synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/config/appservice.py", line 57, in load_appservices synapse-synapse-main-0 synapse appservice = _load_appservice(hostname, yaml.safe_load(f), config_file) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/config/appservice.py", line 91, in _load_appservice synapse-synapse-main-0 synapse raise KeyError( synapse-synapse-main-0 synapse KeyError: "Required string field: 'id' (/as/synapse-hookshot-as/registration.yaml)" synapse-synapse-main-0 synapse 2023-01-13 23:05:25,452 - synapse.app._base - 207 - ERROR - main - Exception during startup synapse-synapse-main-0 synapse Traceback (most recent call last): synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/app/homeserver.py", line 340, in setup synapse-synapse-main-0 synapse hs.setup() synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/server.py", line 310, in setup synapse-synapse-main-0 synapse self.datastores = Databases(self.DATASTORE_CLASS, self) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/__init__.py", line 93, in __init__ synapse-synapse-main-0 synapse main = main_store_class(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/__init__.py", line 139, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/events_bg_updates.py", line 98, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/devices.py", line 1584, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/devices.py", line 89, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/roommember.py", line 1494, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/room.py", line 1827, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/room.py", line 1365, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/room.py", line 119, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/registration.py", line 2158, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/presence.py", line 67, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/presence.py", line 48, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/transactions.py", line 73, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/state.py", line 666, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/state.py", line 82, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/state.py", line 470, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/event_federation.py", line 2007, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/media_repository.py", line 148, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/media_repository.py", line 68, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/push_rule.py", line 330, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/event_push_actions.py", line 1938, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/metrics.py", line 68, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/event_push_actions.py", line 249, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/end_to_end_keys.py", line 1181, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/search.py", line 426, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/search.py", line 137, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/account_data.py", line 64, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/push_rule.py", line 114, in __init__ synapse-synapse-main-0 synapse super().__init__(database, db_conn, hs) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/storage/databases/main/appservice.py", line 76, in __init__ synapse-synapse-main-0 synapse self.services_cache = load_appservices( synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/config/appservice.py", line 57, in load_appservices synapse-synapse-main-0 synapse appservice = _load_appservice(hostname, yaml.safe_load(f), config_file) synapse-synapse-main-0 synapse File "/usr/local/lib/python3.9/site-packages/synapse/config/appservice.py", line 91, in _load_appservice synapse-synapse-main-0 synapse raise KeyError( synapse-synapse-main-0 synapse KeyError: "Required string field: 'id' (/as/synapse-hookshot-as/registration.yaml)" synapse-synapse-main-0 synapse ****************************************************************************** synapse-synapse-main-0 synapse Error during initialisation: synapse-synapse-main-0 synapse "Required string field: 'id' (/as/synapse-hookshot-as/registration.yaml)" synapse-synapse-main-0 synapse There may be more information in the logs. synapse-synapse-main-0 synapse ****************************************************************************** ``` * Changelog --- changelog.d/14845.doc | 1 + docs/application_services.md | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/14845.doc diff --git a/changelog.d/14845.doc b/changelog.d/14845.doc new file mode 100644 index 000000000000..dd969aa05cea --- /dev/null +++ b/changelog.d/14845.doc @@ -0,0 +1 @@ +Fix the example config missing the `id` field in [application service documentation](https://matrix-org.github.io/synapse/latest/application_services.html). diff --git a/docs/application_services.md b/docs/application_services.md index e4592010a2b5..1f988185a95e 100644 --- a/docs/application_services.md +++ b/docs/application_services.md @@ -15,6 +15,7 @@ app_service_config_files: The format of the AS configuration file is as follows: ```yaml +id: url: as_token: hs_token: From a302d3ecf75493f84fc5be616fee7d199ed12394 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 16 Jan 2023 13:16:19 +0000 Subject: [PATCH 025/344] Remove unnecessary reactor reference from `_PerHostRatelimiter` (#14842) Fix up #14812 to avoid introducing a reference to the reactor. Signed-off-by: Sean Quah --- changelog.d/14842.bugfix | 1 + synapse/rest/client/register.py | 1 - synapse/server.py | 1 - synapse/util/ratelimitutils.py | 10 ++-------- tests/util/test_ratelimitutils.py | 8 ++++---- 5 files changed, 7 insertions(+), 14 deletions(-) create mode 100644 changelog.d/14842.bugfix diff --git a/changelog.d/14842.bugfix b/changelog.d/14842.bugfix new file mode 100644 index 000000000000..94e0d70cbcc4 --- /dev/null +++ b/changelog.d/14842.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index be696c304bb9..3cb1e7e37501 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -310,7 +310,6 @@ def __init__(self, hs: "HomeServer"): self.hs = hs self.registration_handler = hs.get_registration_handler() self.ratelimiter = FederationRateLimiter( - hs.get_reactor(), hs.get_clock(), FederationRatelimitSettings( # Time window of 2s diff --git a/synapse/server.py b/synapse/server.py index c8752baa5ae0..f4ab94c4f33c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -768,7 +768,6 @@ def get_replication_streams(self) -> Dict[str, Stream]: @cache_in_self def get_federation_ratelimiter(self) -> FederationRateLimiter: return FederationRateLimiter( - self.get_reactor(), self.get_clock(), config=self.config.ratelimiting.rc_federation, metrics_name="federation_servlets", diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index bd72947bfed7..f262bf95a0fd 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -34,7 +34,6 @@ from typing_extensions import ContextManager from twisted.internet import defer -from twisted.internet.interfaces import IReactorTime from synapse.api.errors import LimitExceededError from synapse.config.ratelimiting import FederationRatelimitSettings @@ -147,14 +146,12 @@ class FederationRateLimiter: def __init__( self, - reactor: IReactorTime, clock: Clock, config: FederationRatelimitSettings, metrics_name: Optional[str] = None, ): """ Args: - reactor clock config metrics_name: The name of the rate limiter so we can differentiate it @@ -166,7 +163,7 @@ def __init__( def new_limiter() -> "_PerHostRatelimiter": return _PerHostRatelimiter( - reactor=reactor, clock=clock, config=config, metrics_name=metrics_name + clock=clock, config=config, metrics_name=metrics_name ) self.ratelimiters: DefaultDict[ @@ -197,14 +194,12 @@ def ratelimit(self, host: str) -> "_GeneratorContextManager[defer.Deferred[None] class _PerHostRatelimiter: def __init__( self, - reactor: IReactorTime, clock: Clock, config: FederationRatelimitSettings, metrics_name: Optional[str] = None, ): """ Args: - reactor clock config metrics_name: The name of the rate limiter so we can differentiate it @@ -212,7 +207,6 @@ def __init__( for this rate limiter. from the rest in the metrics """ - self.reactor = reactor self.clock = clock self.metrics_name = metrics_name @@ -388,4 +382,4 @@ def start_next_request() -> None: except KeyError: pass - self.reactor.callLater(0.0, start_next_request) + self.clock.call_later(0.0, start_next_request) diff --git a/tests/util/test_ratelimitutils.py b/tests/util/test_ratelimitutils.py index 2f3ea15b965f..fe4961dcf3ab 100644 --- a/tests/util/test_ratelimitutils.py +++ b/tests/util/test_ratelimitutils.py @@ -30,7 +30,7 @@ def test_ratelimit(self) -> None: """A simple test with the default values""" reactor, clock = get_clock() rc_config = build_rc_config() - ratelimiter = FederationRateLimiter(reactor, clock, rc_config) + ratelimiter = FederationRateLimiter(clock, rc_config) with ratelimiter.ratelimit("testhost") as d1: # shouldn't block @@ -40,7 +40,7 @@ def test_concurrent_limit(self) -> None: """Test what happens when we hit the concurrent limit""" reactor, clock = get_clock() rc_config = build_rc_config({"rc_federation": {"concurrent": 2}}) - ratelimiter = FederationRateLimiter(reactor, clock, rc_config) + ratelimiter = FederationRateLimiter(clock, rc_config) with ratelimiter.ratelimit("testhost") as d1: # shouldn't block @@ -67,7 +67,7 @@ def test_sleep_limit(self) -> None: rc_config = build_rc_config( {"rc_federation": {"sleep_limit": 2, "sleep_delay": 500}} ) - ratelimiter = FederationRateLimiter(reactor, clock, rc_config) + ratelimiter = FederationRateLimiter(clock, rc_config) with ratelimiter.ratelimit("testhost") as d1: # shouldn't block @@ -98,7 +98,7 @@ def test_lots_of_queued_things(self) -> None: } } ) - ratelimiter = FederationRateLimiter(reactor, clock, rc_config) + ratelimiter = FederationRateLimiter(clock, rc_config) with ratelimiter.ratelimit("testhost") as d: # shouldn't block From 4db3331bb95a655bb56ab8333be49ee183f71715 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 16 Jan 2023 14:20:12 +0000 Subject: [PATCH 026/344] Add an early return when handling no-op presence updates. (#14855) This stops us from incrementing the presence stream position for no-op updates. --- changelog.d/14855.misc | 1 + synapse/handlers/presence.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/14855.misc diff --git a/changelog.d/14855.misc b/changelog.d/14855.misc new file mode 100644 index 000000000000..f0e292f2878f --- /dev/null +++ b/changelog.d/14855.misc @@ -0,0 +1 @@ +Add an early return when handling no-op presence updates. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 2af90b25a39c..43e4e7b1b4c2 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -2155,6 +2155,11 @@ def send_presence_to_destinations( # This should only be called on a presence writer. assert self._presence_writer + if not states or not destinations: + # Ignore calls which either don't have any new states or don't need + # to be sent anywhere. + return + if self._federation: self._federation.send_presence_to_destinations( states=states, From db5145a31d8ed76ac637f933f4facc195d557f75 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 16 Jan 2023 23:15:17 +0000 Subject: [PATCH 027/344] Add parameter to control whether we do a partial state join (#14843) When the local homeserver is already joined to a room and wants to perform another remote join, we may find it useful to do a non-partial state join if we already have the full state for the room. Signed-off-by: Sean Quah --- changelog.d/14843.misc | 1 + synapse/federation/federation_client.py | 21 ++++++++++++++++++--- synapse/federation/transport/client.py | 7 +++++-- 3 files changed, 24 insertions(+), 5 deletions(-) create mode 100644 changelog.d/14843.misc diff --git a/changelog.d/14843.misc b/changelog.d/14843.misc new file mode 100644 index 000000000000..bec3c216bc93 --- /dev/null +++ b/changelog.d/14843.misc @@ -0,0 +1 @@ +Add a parameter to control whether the federation client performs a partial state join. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index b7002e8a6cb3..15a9a8830271 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1014,7 +1014,11 @@ async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]: ) async def send_join( - self, destinations: Iterable[str], pdu: EventBase, room_version: RoomVersion + self, + destinations: Iterable[str], + pdu: EventBase, + room_version: RoomVersion, + partial_state: bool = True, ) -> SendJoinResult: """Sends a join event to one of a list of homeservers. @@ -1027,6 +1031,10 @@ async def send_join( pdu: event to be sent room_version: the version of the room (according to the server that did the make_join) + partial_state: whether to ask the remote server to omit membership state + events from the response. If the remote server complies, + `partial_state` in the send join result will be set. Defaults to + `True`. Returns: The result of the send join request. @@ -1037,7 +1045,9 @@ async def send_join( """ async def send_request(destination: str) -> SendJoinResult: - response = await self._do_send_join(room_version, destination, pdu) + response = await self._do_send_join( + room_version, destination, pdu, omit_members=partial_state + ) # If an event was returned (and expected to be returned): # @@ -1177,7 +1187,11 @@ async def _execute(pdu: EventBase) -> None: ) async def _do_send_join( - self, room_version: RoomVersion, destination: str, pdu: EventBase + self, + room_version: RoomVersion, + destination: str, + pdu: EventBase, + omit_members: bool, ) -> SendJoinResponse: time_now = self._clock.time_msec() @@ -1188,6 +1202,7 @@ async def _do_send_join( room_id=pdu.room_id, event_id=pdu.event_id, content=pdu.get_pdu_json(time_now), + omit_members=omit_members, ) except HttpResponseException as e: # If an error is received that is due to an unrecognised endpoint, diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 5ec651400ac5..556883f0798b 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -351,13 +351,16 @@ async def send_join_v2( room_id: str, event_id: str, content: JsonDict, + omit_members: bool, ) -> "SendJoinResponse": path = _create_v2_path("/send_join/%s/%s", room_id, event_id) query_params: Dict[str, str] = {} if self._faster_joins_enabled: # lazy-load state on join - query_params["org.matrix.msc3706.partial_state"] = "true" - query_params["omit_members"] = "true" + query_params["org.matrix.msc3706.partial_state"] = ( + "true" if omit_members else "false" + ) + query_params["omit_members"] = "true" if omit_members else "false" return await self.client.put_json( destination=destination, From 2b084c5b710d9630178484e6ade597ca7fa814b6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 17 Jan 2023 09:29:58 +0000 Subject: [PATCH 028/344] Merge device list replication streams (#14833) --- changelog.d/14826.misc | 2 +- changelog.d/14833.misc | 1 + docs/upgrade.md | 9 +-- synapse/replication/tcp/client.py | 8 ++- synapse/replication/tcp/streams/__init__.py | 3 - synapse/replication/tcp/streams/_base.py | 74 +++++++++++++++------ synapse/storage/databases/main/devices.py | 13 ++-- 7 files changed, 72 insertions(+), 38 deletions(-) create mode 100644 changelog.d/14833.misc diff --git a/changelog.d/14826.misc b/changelog.d/14826.misc index 9ebedcf51e7c..e80673a72167 100644 --- a/changelog.d/14826.misc +++ b/changelog.d/14826.misc @@ -1 +1 @@ -Merge tag and normal account data replication streams. +Merge the two account data and the two device list replication streams. diff --git a/changelog.d/14833.misc b/changelog.d/14833.misc new file mode 100644 index 000000000000..e80673a72167 --- /dev/null +++ b/changelog.d/14833.misc @@ -0,0 +1 @@ +Merge the two account data and the two device list replication streams. diff --git a/docs/upgrade.md b/docs/upgrade.md index 8a76172e43cc..270c33b6562e 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -92,12 +92,13 @@ process, for example: ## Changes to the account data replication streams -Synapse has changed the format of the account data replication streams (between -workers). This is a forwards- and backwards-incompatible change: v1.75 workers -cannot process account data replicated by v1.76 workers, and vice versa. +Synapse has changed the format of the account data and devices replication +streams (between workers). This is a forwards- and backwards-incompatible +change: v1.75 workers cannot process account data replicated by v1.76 workers, +and vice versa. Once all workers are upgraded to v1.76 (or downgraded to v1.75), account data -replication will resume as normal. +and device replication will resume as normal. # Upgrading to v1.74.0 diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 7263bb2796da..31022ce5fb41 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -187,7 +187,7 @@ async def on_rdata( elif stream_name == DeviceListsStream.NAME: all_room_ids: Set[str] = set() for row in rows: - if row.entity.startswith("@"): + if row.entity.startswith("@") and not row.is_signature: room_ids = await self.store.get_rooms_for_user(row.entity) all_room_ids.update(room_ids) self.notifier.on_new_event( @@ -422,7 +422,11 @@ async def process_replication_rows( # The entities are either user IDs (starting with '@') whose devices # have changed, or remote servers that we need to tell about # changes. - hosts = {row.entity for row in rows if not row.entity.startswith("@")} + hosts = { + row.entity + for row in rows + if not row.entity.startswith("@") and not row.is_signature + } for host in hosts: self.federation_sender.send_device_messages(host, immediate=False) diff --git a/synapse/replication/tcp/streams/__init__.py b/synapse/replication/tcp/streams/__init__.py index a7eadfa3c9f9..9c67f661a362 100644 --- a/synapse/replication/tcp/streams/__init__.py +++ b/synapse/replication/tcp/streams/__init__.py @@ -37,7 +37,6 @@ Stream, ToDeviceStream, TypingStream, - UserSignatureStream, ) from synapse.replication.tcp.streams.events import EventsStream from synapse.replication.tcp.streams.federation import FederationStream @@ -62,7 +61,6 @@ ToDeviceStream, FederationStream, AccountDataStream, - UserSignatureStream, UnPartialStatedRoomStream, UnPartialStatedEventStream, ) @@ -82,7 +80,6 @@ "DeviceListsStream", "ToDeviceStream", "AccountDataStream", - "UserSignatureStream", "UnPartialStatedRoomStream", "UnPartialStatedEventStream", ] diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index fbf78da9c254..a4bdb48c0c99 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -463,18 +463,67 @@ class DeviceListsStream(Stream): @attr.s(slots=True, frozen=True, auto_attribs=True) class DeviceListsStreamRow: entity: str + # Indicates that a user has signed their own device with their user-signing key + is_signature: bool NAME = "device_lists" ROW_TYPE = DeviceListsStreamRow def __init__(self, hs: "HomeServer"): - store = hs.get_datastores().main + self.store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - current_token_without_instance(store.get_device_stream_token), - store.get_all_device_list_changes_for_remotes, + current_token_without_instance(self.store.get_device_stream_token), + self._update_function, + ) + + async def _update_function( + self, + instance_name: str, + from_token: Token, + current_token: Token, + target_row_count: int, + ) -> StreamUpdateResult: + ( + device_updates, + devices_to_token, + devices_limited, + ) = await self.store.get_all_device_list_changes_for_remotes( + instance_name, from_token, current_token, target_row_count ) + ( + signatures_updates, + signatures_to_token, + signatures_limited, + ) = await self.store.get_all_user_signature_changes_for_remotes( + instance_name, from_token, current_token, target_row_count + ) + + upper_limit_token = current_token + if devices_limited: + upper_limit_token = min(upper_limit_token, devices_to_token) + if signatures_limited: + upper_limit_token = min(upper_limit_token, signatures_to_token) + + device_updates = [ + (stream_id, (entity, False)) + for stream_id, (entity,) in device_updates + if stream_id <= upper_limit_token + ] + + signatures_updates = [ + (stream_id, (entity, True)) + for stream_id, (entity,) in signatures_updates + if stream_id <= upper_limit_token + ] + + updates = list( + heapq.merge(device_updates, signatures_updates, key=lambda row: row[0]) + ) + + return updates, upper_limit_token, devices_limited or signatures_limited + class ToDeviceStream(Stream): """New to_device messages for a client""" @@ -583,22 +632,3 @@ async def _update_function( heapq.merge(room_rows, global_rows, tag_rows, key=lambda row: row[0]) ) return updates, to_token, limited - - -class UserSignatureStream(Stream): - """A user has signed their own device with their user-signing key""" - - @attr.s(slots=True, frozen=True, auto_attribs=True) - class UserSignatureStreamRow: - user_id: str - - NAME = "user_signature" - ROW_TYPE = UserSignatureStreamRow - - def __init__(self, hs: "HomeServer"): - store = hs.get_datastores().main - super().__init__( - hs.get_instance_name(), - current_token_without_instance(store.get_device_stream_token), - store.get_all_user_signature_changes_for_remotes, - ) diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index b06766447338..cd186c84726c 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -38,7 +38,7 @@ whitelisted_homeserver, ) from synapse.metrics.background_process_metrics import wrap_as_background_process -from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream +from synapse.replication.tcp.streams._base import DeviceListsStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, @@ -163,9 +163,7 @@ def process_replication_rows( ) -> None: if stream_name == DeviceListsStream.NAME: self._invalidate_caches_for_devices(token, rows) - elif stream_name == UserSignatureStream.NAME: - for row in rows: - self._user_signature_stream_cache.entity_has_changed(row.user_id, token) + return super().process_replication_rows(stream_name, instance_name, token, rows) def process_replication_position( @@ -173,14 +171,17 @@ def process_replication_position( ) -> None: if stream_name == DeviceListsStream.NAME: self._device_list_id_gen.advance(instance_name, token) - elif stream_name == UserSignatureStream.NAME: - self._device_list_id_gen.advance(instance_name, token) + super().process_replication_position(stream_name, instance_name, token) def _invalidate_caches_for_devices( self, token: int, rows: Iterable[DeviceListsStream.DeviceListsStreamRow] ) -> None: for row in rows: + if row.is_signature: + self._user_signature_stream_cache.entity_has_changed(row.entity, token) + continue + # The entities are either user IDs (starting with '@') whose devices # have changed, or remote servers that we need to tell about # changes. From 316590d1ea273115a9e7925236e02d577a231de4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 17 Jan 2023 09:58:22 +0000 Subject: [PATCH 029/344] Fix bug in `wait_for_stream_position` (#14856) We were incorrectly checking if the *local* token had been advanced, rather than the token for the remote instance. In practice, I don't think this has caused any bugs due to where we use `wait_for_stream_position`, as critically we don't use it on instances that also write to the given streams (and so the local token will lag behind all remote tokens). --- changelog.d/14856.misc | 1 + synapse/replication/tcp/client.py | 2 +- tests/replication/tcp/test_handler.py | 78 +++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14856.misc diff --git a/changelog.d/14856.misc b/changelog.d/14856.misc new file mode 100644 index 000000000000..3731d6cbf184 --- /dev/null +++ b/changelog.d/14856.misc @@ -0,0 +1 @@ +Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 31022ce5fb41..322d695bc7f0 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -325,7 +325,7 @@ async def wait_for_stream_position( # anyway in that case we don't need to wait. return - current_position = self._streams[stream_name].current_token(self._instance_name) + current_position = self._streams[stream_name].current_token(instance_name) if position <= current_position: # We're already past the position return diff --git a/tests/replication/tcp/test_handler.py b/tests/replication/tcp/test_handler.py index 1e299d2d67ea..555922409d13 100644 --- a/tests/replication/tcp/test_handler.py +++ b/tests/replication/tcp/test_handler.py @@ -12,6 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. +from twisted.internet import defer + +from synapse.replication.tcp.commands import PositionCommand, RdataCommand + from tests.replication._base import BaseMultiWorkerStreamTestCase @@ -71,3 +75,77 @@ def test_non_background_worker_not_subscribed_to_user_ip(self) -> None: self.assertEqual( len(self._redis_server._subscribers_by_channel[b"test/USER_IP"]), 1 ) + + def test_wait_for_stream_position(self) -> None: + """Check that wait for stream position correctly waits for an update from the + correct instance. + """ + store = self.hs.get_datastores().main + cmd_handler = self.hs.get_replication_command_handler() + data_handler = self.hs.get_replication_data_handler() + + worker1 = self.make_worker_hs( + "synapse.app.generic_worker", + extra_config={ + "worker_name": "worker1", + "run_background_tasks_on": "worker1", + "redis": {"enabled": True}, + }, + ) + + cache_id_gen = worker1.get_datastores().main._cache_id_gen + assert cache_id_gen is not None + + self.replicate() + + # First, make sure the master knows that `worker1` exists. + initial_token = cache_id_gen.get_current_token() + cmd_handler.send_command( + PositionCommand("caches", "worker1", initial_token, initial_token) + ) + self.replicate() + + # Next send out a normal RDATA, and check that waiting for that stream + # ID returns immediately. + ctx = cache_id_gen.get_next() + next_token = self.get_success(ctx.__aenter__()) + self.get_success(ctx.__aexit__(None, None, None)) + + cmd_handler.send_command( + RdataCommand("caches", "worker1", next_token, ("func_name", [], 0)) + ) + self.replicate() + + self.get_success( + data_handler.wait_for_stream_position("worker1", "caches", next_token) + ) + + # `wait_for_stream_position` should only return once master receives an + # RDATA from the worker + ctx = cache_id_gen.get_next() + next_token = self.get_success(ctx.__aenter__()) + self.get_success(ctx.__aexit__(None, None, None)) + + d = defer.ensureDeferred( + data_handler.wait_for_stream_position("worker1", "caches", next_token) + ) + self.assertFalse(d.called) + + # ... updating the cache ID gen on the master still shouldn't cause the + # deferred to wake up. + ctx = store._cache_id_gen.get_next() + self.get_success(ctx.__aenter__()) + self.get_success(ctx.__aexit__(None, None, None)) + + d = defer.ensureDeferred( + data_handler.wait_for_stream_position("worker1", "caches", next_token) + ) + self.assertFalse(d.called) + + # ... but receiving the RDATA should + cmd_handler.send_command( + RdataCommand("caches", "worker1", next_token, ("func_name", [], 0)) + ) + self.replicate() + + self.assertTrue(d.called) From 5b3af1c7d0c5a8901fada7648136f186726fd135 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 17 Jan 2023 12:44:15 +0000 Subject: [PATCH 030/344] Stabilise serving partial join responses (#14839) Serving partial join responses is no longer experimental. They will only be served under the stable identifier if the the undocumented config flag experimental.msc3706_enabled is set to true. Synapse continues to request a partial join only if the undocumented config flag experimental.faster_joins is set to true; this setting remains present and unaffected. --- changelog.d/14839.feature | 1 + .../conf/workers-shared-extra.yaml.j2 | 2 -- synapse/config/experimental.py | 6 +++++- .../federation/transport/server/federation.py | 21 +++++++++---------- tests/federation/test_federation_server.py | 3 +-- 5 files changed, 17 insertions(+), 16 deletions(-) create mode 100644 changelog.d/14839.feature diff --git a/changelog.d/14839.feature b/changelog.d/14839.feature new file mode 100644 index 000000000000..a4206be007dd --- /dev/null +++ b/changelog.d/14839.feature @@ -0,0 +1 @@ +Faster joins: always serve a partial join response to servers that request it with the stable query param. diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index 1170694df501..7e9ec23808c6 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -94,8 +94,6 @@ allow_device_name_lookup_over_federation: true experimental_features: # Enable history backfilling support msc2716_enabled: true - # server-side support for partial state in /send_join responses - msc3706_enabled: true {% if not workers_in_use %} # client-side support for partial state in /send_join responses faster_joins: true diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 72a17e0616d5..0444ef82441e 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -75,11 +75,15 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: ) # MSC3706 (server-side support for partial state in /send_join responses) + # Synapse will always serve partial state responses to requests using the stable + # query parameter `omit_members`. If this flag is set, Synapse will also serve + # partial state responses to requests using the unstable query parameter + # `org.matrix.msc3706.partial_state`. self.msc3706_enabled: bool = experimental.get("msc3706_enabled", False) # experimental support for faster joins over federation # (MSC2775, MSC3706, MSC3895) - # requires a target server with msc3706_enabled enabled. + # requires a target server that can provide a partial join response (MSC3706) self.faster_joins_enabled: bool = experimental.get("faster_joins", False) # MSC3720 (Account status endpoint) diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index c0a700905b8b..17c427387e03 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -422,7 +422,7 @@ def __init__( server_name: str, ): super().__init__(hs, authenticator, ratelimiter, server_name) - self._msc3706_enabled = hs.config.experimental.msc3706_enabled + self._read_msc3706_query_param = hs.config.experimental.msc3706_enabled async def on_PUT( self, @@ -436,16 +436,15 @@ async def on_PUT( # match those given in content partial_state = False - if self._msc3706_enabled: - # The stable query parameter wins, if it disagrees with the unstable - # parameter for some reason. - stable_param = parse_boolean_from_args(query, "omit_members", default=None) - if stable_param is not None: - partial_state = stable_param - else: - partial_state = parse_boolean_from_args( - query, "org.matrix.msc3706.partial_state", default=False - ) + # The stable query parameter wins, if it disagrees with the unstable + # parameter for some reason. + stable_param = parse_boolean_from_args(query, "omit_members", default=None) + if stable_param is not None: + partial_state = stable_param + elif self._read_msc3706_query_param: + partial_state = parse_boolean_from_args( + query, "org.matrix.msc3706.partial_state", default=False + ) result = await self.handler.on_send_join_request( origin, content, room_id, caller_supports_partial_state=partial_state diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 27770304be43..be719e49c0ca 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -211,9 +211,8 @@ def test_send_join(self): ) self.assertEqual(r[("m.room.member", joining_user)].membership, "join") - @override_config({"experimental_features": {"msc3706_enabled": True}}) def test_send_join_partial_state(self) -> None: - """When MSC3706 support is enabled, /send_join should return partial state""" + """/send_join should return partial state, if requested""" joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME join_result = self._make_join(joining_user) From b88cfe6d41ede17bde4144ca12f9dc9d0ef21215 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 17 Jan 2023 18:04:44 +0000 Subject: [PATCH 031/344] Require poetry>=1.3.2 (#14860) * Upgrade to new lockfile format Now requires poetry >= 1.2.2 to read and poetry >= 1.3.0 to write. Cheat sheet: ``` poetry --version poetry show > scratch/before pipx upgrade poetry poetry --version poetry show > scratch/after diff scratch{before,after} && echo "no change!" ``` * Use Poetry 1.3.2 when reading or writing lockfile * Remove unneeded(?) poetry dep for cibuildwheel * Update docs * Remove redundant call to setup-python * Remove outdated comments related to Poetry 1.x * Remove outdated docs line was fixed in #13082 * Minor improvements to poetry cheat sheet * Invoke setup-python-poetry with explicit version Not sure about this. It's hardcoding versions everywhere. * Changelog * Check the lockfile is version 2.0 Might one day incorporate other checks like #14742 * Typo fixes, thanks Sean Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- .ci/scripts/check_lockfile.py | 23 + .ci/scripts/prepare_old_deps.sh | 2 +- .github/workflows/latest_deps.yml | 2 +- .github/workflows/release-artifacts.yml | 2 +- .github/workflows/tests.yml | 21 +- .github/workflows/twisted_trunk.yml | 2 +- changelog.d/14860.removal | 1 + debian/build_virtualenv | 3 +- debian/changelog | 6 + docker/Dockerfile | 12 +- docs/development/contributing_guide.md | 2 +- docs/development/dependencies.md | 17 +- docs/upgrade.md | 7 + poetry.lock | 2941 ++++++++++++----------- 14 files changed, 1548 insertions(+), 1493 deletions(-) create mode 100755 .ci/scripts/check_lockfile.py create mode 100644 changelog.d/14860.removal diff --git a/.ci/scripts/check_lockfile.py b/.ci/scripts/check_lockfile.py new file mode 100755 index 000000000000..dfdc0105d58a --- /dev/null +++ b/.ci/scripts/check_lockfile.py @@ -0,0 +1,23 @@ +#! /usr/bin/env python +import sys + +if sys.version_info < (3, 11): + raise RuntimeError("Requires at least Python 3.11, to import tomllib") + +import tomllib + +with open("poetry.lock", "rb") as f: + lockfile = tomllib.load(f) + +try: + lock_version = lockfile["metadata"]["lock-version"] + assert lock_version == "2.0" +except Exception: + print( + """\ + Lockfile is not version 2.0. You probably need to upgrade poetry on your local box + and re-run `poetry lock --no-update`. See the Poetry cheat sheet at + https://matrix-org.github.io/synapse/develop/development/dependencies.html + """ + ) + raise diff --git a/.ci/scripts/prepare_old_deps.sh b/.ci/scripts/prepare_old_deps.sh index 7e4f060b17b8..3398193ee565 100755 --- a/.ci/scripts/prepare_old_deps.sh +++ b/.ci/scripts/prepare_old_deps.sh @@ -53,7 +53,7 @@ with open('pyproject.toml', 'w') as f: " python3 -c "$REMOVE_DEV_DEPENDENCIES" -pip install poetry==1.2.0 +pip install poetry==1.3.2 poetry lock echo "::group::Patched pyproject.toml" diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 5ab9a8af3411..9ede9e90bf38 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -37,7 +37,7 @@ jobs: - uses: matrix-org/setup-python-poetry@v1 with: python-version: "3.x" - poetry-version: "1.2.0" + poetry-version: "1.3.2" extras: "all" # Dump installed versions for debugging. - run: poetry run pip list > before.txt diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 30ac4c157169..f540d2d28b09 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -127,7 +127,7 @@ jobs: python-version: "3.x" - name: Install cibuildwheel - run: python -m pip install cibuildwheel==2.9.0 poetry==1.2.0 + run: python -m pip install cibuildwheel==2.9.0 - name: Set up QEMU to emulate aarch64 if: matrix.arch == 'aarch64' diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5a0c0a0d65c9..28fc6d45e613 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -33,11 +33,10 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: "3.x" - uses: matrix-org/setup-python-poetry@v1 with: + python-version: "3.x" + poetry-version: "1.3.2" extras: "all" - run: poetry run scripts-dev/generate_sample_config.sh --check - run: poetry run scripts-dev/config-lint.sh @@ -52,6 +51,15 @@ jobs: - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'" - run: scripts-dev/check_schema_delta.py --force-colors + check-lockfile: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: "3.x" + - run: .ci/scripts/check_lockfile.py + lint: uses: "matrix-org/backend-meta/.github/workflows/python-poetry-ci.yml@v2" with: @@ -88,6 +96,7 @@ jobs: ref: ${{ github.event.pull_request.head.sha }} - uses: matrix-org/setup-python-poetry@v1 with: + poetry-version: "1.3.2" extras: "all" - run: poetry run scripts-dev/check_pydantic_models.py @@ -163,6 +172,7 @@ jobs: - lint-pydantic - check-sampleconfig - check-schema-delta + - check-lockfile - lint-clippy - lint-rustfmt runs-on: ubuntu-latest @@ -219,6 +229,7 @@ jobs: - uses: matrix-org/setup-python-poetry@v1 with: python-version: ${{ matrix.job.python-version }} + poetry-version: "1.3.2" extras: ${{ matrix.job.extras }} - name: Await PostgreSQL if: ${{ matrix.job.postgres-version }} @@ -294,6 +305,7 @@ jobs: - uses: matrix-org/setup-python-poetry@v1 with: python-version: '3.7' + poetry-version: "1.3.2" extras: "all test" - run: poetry run trial -j6 tests @@ -328,6 +340,7 @@ jobs: - uses: matrix-org/setup-python-poetry@v1 with: python-version: ${{ matrix.python-version }} + poetry-version: "1.3.2" extras: ${{ matrix.extras }} - run: poetry run trial --jobs=2 tests - name: Dump logs @@ -419,6 +432,7 @@ jobs: - run: sudo apt-get -qq install xmlsec1 postgresql-client - uses: matrix-org/setup-python-poetry@v1 with: + poetry-version: "1.3.2" extras: "postgres" - run: .ci/scripts/test_export_data_command.sh env: @@ -470,6 +484,7 @@ jobs: - uses: matrix-org/setup-python-poetry@v1 with: python-version: ${{ matrix.python-version }} + poetry-version: "1.3.2" extras: "postgres" - run: .ci/scripts/test_synapse_port_db.sh id: run_tester_script diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 0a88f0cd7b6e..47fae0b79bee 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -148,7 +148,7 @@ jobs: run: | set -x DEBIAN_FRONTEND=noninteractive sudo apt-get install -yqq python3 pipx - pipx install poetry==1.2.0 + pipx install poetry==1.3.2 poetry remove -n twisted poetry add -n --extras tls git+https://github.com/twisted/twisted.git#trunk diff --git a/changelog.d/14860.removal b/changelog.d/14860.removal new file mode 100644 index 000000000000..3458d38a4825 --- /dev/null +++ b/changelog.d/14860.removal @@ -0,0 +1 @@ +Poetry 1.3.2 or higher is now required when `poetry install`ing from source. diff --git a/debian/build_virtualenv b/debian/build_virtualenv index dd97e888ba76..5fc817b60704 100755 --- a/debian/build_virtualenv +++ b/debian/build_virtualenv @@ -31,12 +31,11 @@ case $(dpkg-architecture -q DEB_HOST_ARCH) in esac # Manually install Poetry and export a pip-compatible `requirements.txt` -# We need a Poetry pre-release as the export command is buggy in < 1.2 TEMP_VENV="$(mktemp -d)" python3 -m venv "$TEMP_VENV" source "$TEMP_VENV/bin/activate" pip install -U pip -pip install poetry==1.2.0 +pip install poetry==1.3.2 poetry export \ --extras all \ --extras test \ diff --git a/debian/changelog b/debian/changelog index c4be6ac67aa3..ee7439f38f27 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.75.1) UNRELEASED; urgency=medium + + * Use Poetry 1.3.2 to manage the bundled virtualenv included with this package. + + -- Synapse Packaging team Tue, 17 Jan 2023 15:08:00 +0000 + matrix-synapse-py3 (1.75.0) stable; urgency=medium * New Synapse release 1.75.0. diff --git a/docker/Dockerfile b/docker/Dockerfile index 24a14db9cfdc..b2ec00591721 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -17,14 +17,8 @@ # Irritatingly, there is no blessed guide on how to distribute an application with its # poetry-managed environment in a docker image. We have opted for -# `poetry export | pip install -r /dev/stdin`, but there are known bugs in -# in `poetry export` whose fixes (scheduled for poetry 1.2) have yet to be released. -# In case we get bitten by those bugs in the future, the recommendations here might -# be useful: -# https://github.com/python-poetry/poetry/discussions/1879#discussioncomment-216865 -# https://stackoverflow.com/questions/53835198/integrating-python-poetry-with-docker?answertab=scoredesc - - +# `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in +# in `poetry export` in the past. ARG PYTHON_VERSION=3.9 @@ -49,7 +43,7 @@ RUN \ # We install poetry in its own build stage to avoid its dependencies conflicting with # synapse's dependencies. RUN --mount=type=cache,target=/root/.cache/pip \ - pip install --user "poetry==1.2.0" + pip install --user "poetry==1.3.2" WORKDIR /synapse diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 4c1067671482..3cbfe9698762 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -67,7 +67,7 @@ pipx install poetry but see poetry's [installation instructions](https://python-poetry.org/docs/#installation) for other installation methods. -Synapse requires Poetry version 1.2.0 or later. +Developing Synapse requires Poetry version 1.3.2 or later. Next, open a terminal and install dependencies as follows: diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md index 8474525480d6..b734cc5826df 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md @@ -2,6 +2,13 @@ This is a quick cheat sheet for developers on how to use [`poetry`](https://python-poetry.org/). +# Installing + +See the [contributing guide](contributing_guide.md#4-install-the-dependencies). + +Developers should use Poetry 1.3.2 or higher. If you encounter problems related +to poetry, please [double-check your poetry version](#check-the-version-of-poetry-with-poetry---version). + # Background Synapse uses a variety of third-party Python packages to function as a homeserver. @@ -123,7 +130,7 @@ context of poetry's venv, without having to run `poetry shell` beforehand. ## ...reset my venv to the locked environment? ```shell -poetry install --extras all --remove-untracked +poetry install --all-extras --sync ``` ## ...delete everything and start over from scratch? @@ -183,7 +190,6 @@ Either: - manually update `pyproject.toml`; then `poetry lock --no-update`; or else - `poetry add packagename`. See `poetry add --help`; note the `--dev`, `--extras` and `--optional` flags in particular. - - **NB**: this specifies the new package with a version given by a "caret bound". This won't get forced to its lowest version in the old deps CI job: see [this TODO](https://github.com/matrix-org/synapse/blob/4e1374373857f2f7a911a31c50476342d9070681/.ci/scripts/test_old_deps.sh#L35-L39). Include the updated `pyproject.toml` and `poetry.lock` files in your commit. @@ -196,7 +202,7 @@ poetry remove packagename ``` ought to do the trick. Alternatively, manually update `pyproject.toml` and -`poetry lock --no-update`. Include the updated `pyproject.toml` and poetry.lock` +`poetry lock --no-update`. Include the updated `pyproject.toml` and `poetry.lock` files in your commit. ## ...update the version range for an existing dependency? @@ -240,9 +246,6 @@ poetry export --extras all Be wary of bugs in `poetry export` and `pip install -r requirements.txt`. -Note: `poetry export` will be made a plugin in Poetry 1.2. Additional config may -be required. - ## ...build a test wheel? I usually use @@ -260,7 +263,7 @@ doesn't require poetry. (It's what we use in CI too). However, you could try ## Check the version of poetry with `poetry --version`. -The minimum version of poetry supported by Synapse is 1.2. +The minimum version of poetry supported by Synapse is 1.3.2. It can also be useful to check the version of `poetry-core` in use. If you've installed `poetry` with `pipx`, try `pipx runpip poetry list | grep diff --git a/docs/upgrade.md b/docs/upgrade.md index 270c33b6562e..0d486a3c8200 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -100,6 +100,13 @@ and vice versa. Once all workers are upgraded to v1.76 (or downgraded to v1.75), account data and device replication will resume as normal. +## Minimum version of Poetry is now 1.3.2 + +The minimum supported version of Poetry is now 1.3.2 (previously 1.2.0, [since +Synapse 1.67](#upgrading-to-v1670)). If you have used `poetry install` to +install Synapse from a source checkout, you should upgrade poetry: see its +[installation instructions](https://python-poetry.org/docs/#installation). +For all other installation methods, no acction is required. # Upgrading to v1.74.0 diff --git a/poetry.lock b/poetry.lock index f148eaf8cf12..dffe1e0f7635 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,3 +1,5 @@ +# This file is automatically @generated by Poetry and should not be changed by hand. + [[package]] name = "attrs" version = "22.2.0" @@ -5,6 +7,10 @@ description = "Classes Without Boilerplate" category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, + {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, +] [package.extras] cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] @@ -20,6 +26,10 @@ description = "The ultimate Python library in building OAuth and OpenID Connect category = "main" optional = true python-versions = "*" +files = [ + {file = "Authlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:4ddf4fd6cfa75c9a460b361d4bd9dac71ffda0be879dbe4292a02e92349ad55a"}, + {file = "Authlib-1.2.0.tar.gz", hash = "sha256:4fa3e80883a5915ef9f5bc28630564bc4ed5b5af39812a3ff130ec76bd631e9d"}, +] [package.dependencies] cryptography = ">=3.2" @@ -31,6 +41,10 @@ description = "Self-service finite-state machines for the programmer on the go." category = "main" optional = false python-versions = "*" +files = [ + {file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"}, + {file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"}, +] [package.dependencies] attrs = ">=19.2.0" @@ -46,6 +60,29 @@ description = "Modern password hashing for your software and your servers" category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2"}, + {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535"}, + {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e"}, + {file = "bcrypt-4.0.1-cp36-abi3-win32.whl", hash = "sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab"}, + {file = "bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71"}, + {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"}, +] [package.extras] tests = ["pytest (>=3.2.1,!=3.3.0)"] @@ -58,6 +95,20 @@ description = "The uncompromising code formatter." category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, + {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, + {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, + {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, + {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, + {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, + {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, + {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, + {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, + {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, + {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, + {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, +] [package.dependencies] click = ">=8.0.0" @@ -81,6 +132,10 @@ description = "An easy safelist-based HTML-sanitizing tool." category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"}, + {file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"}, +] [package.dependencies] six = ">=1.9.0" @@ -97,6 +152,10 @@ description = "Canonical JSON" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"}, + {file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"}, +] [package.dependencies] simplejson = ">=3.14.0" @@ -112,6 +171,10 @@ description = "Python package for providing Mozilla's CA Bundle." category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, + {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, +] [[package]] name = "cffi" @@ -120,6 +183,72 @@ description = "Foreign Function Interface for Python calling C code." category = "main" optional = false python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] [package.dependencies] pycparser = "*" @@ -131,6 +260,10 @@ description = "The Real First Universal Charset Detector. Open, modern and activ category = "main" optional = false python-versions = ">=3.5.0" +files = [ + {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"}, + {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, +] [package.extras] unicode-backport = ["unicodedata2"] @@ -142,6 +275,10 @@ description = "Composable command line interface toolkit" category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] [package.dependencies] colorama = {version = "*", markers = "platform_system == \"Windows\""} @@ -154,6 +291,9 @@ description = "Extends click.Group to invoke a command without explicit subcomma category = "dev" optional = false python-versions = "*" +files = [ + {file = "click-default-group-1.2.2.tar.gz", hash = "sha256:d9560e8e8dfa44b3562fbc9425042a0fd6d21956fcc2db0077f63f34253ab904"}, +] [package.dependencies] click = "*" @@ -165,6 +305,10 @@ description = "Cross-platform colored terminal text." category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, + {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, +] [[package]] name = "commonmark" @@ -173,6 +317,10 @@ description = "Python parser for the CommonMark Markdown spec" category = "dev" optional = false python-versions = "*" +files = [ + {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, + {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, +] [package.extras] test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] @@ -184,6 +332,10 @@ description = "Symbolic constants in Python" category = "main" optional = false python-versions = "*" +files = [ + {file = "constantly-15.1.0-py2.py3-none-any.whl", hash = "sha256:dd2fa9d6b1a51a83f0d7dd76293d734046aa176e384bf6e33b7e44880eb37c5d"}, + {file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"}, +] [[package]] name = "cryptography" @@ -192,6 +344,34 @@ description = "cryptography is a package which provides cryptographic recipes an category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70"}, + {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b"}, + {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c"}, + {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00"}, + {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0"}, + {file = "cryptography-38.0.4-cp36-abi3-win32.whl", hash = "sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744"}, + {file = "cryptography-38.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d"}, + {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353"}, + {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7"}, + {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd"}, + {file = "cryptography-38.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2"}, + {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b"}, + {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6"}, + {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876"}, + {file = "cryptography-38.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b"}, + {file = "cryptography-38.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285"}, + {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b"}, + {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083"}, + {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee"}, + {file = "cryptography-38.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9"}, + {file = "cryptography-38.0.4.tar.gz", hash = "sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290"}, +] [package.dependencies] cffi = ">=1.12" @@ -211,6 +391,10 @@ description = "XML bomb protection for Python stdlib modules" category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] [[package]] name = "deprecated" @@ -219,6 +403,10 @@ description = "Python @deprecated decorator to deprecate old python classes, fun category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"}, + {file = "Deprecated-1.2.13.tar.gz", hash = "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d"}, +] [package.dependencies] wrapt = ">=1.10,<2" @@ -233,6 +421,10 @@ description = "Docutils -- Python Documentation Utilities" category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"}, + {file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"}, +] [[package]] name = "elementpath" @@ -241,6 +433,10 @@ description = "XPath 1.0/2.0 parsers and selectors for ElementTree and lxml" category = "main" optional = true python-versions = ">=3.7" +files = [ + {file = "elementpath-2.5.0-py3-none-any.whl", hash = "sha256:2a432775e37a19e4362443078130a7dbfc457d7d093cd421c03958d9034cc08b"}, + {file = "elementpath-2.5.0.tar.gz", hash = "sha256:3a27aaf3399929fccda013899cb76d3ff111734abf4281e5f9d3721ba0b9ffa3"}, +] [package.extras] dev = ["Sphinx", "coverage", "flake8", "lxml", "memory-profiler", "mypy (==0.910)", "tox", "xmlschema (>=1.8.0)"] @@ -252,6 +448,25 @@ description = "A simple immutable dictionary" category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "frozendict-2.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a3b32d47282ae0098b9239a6d53ec539da720258bd762d62191b46f2f87c5fc"}, + {file = "frozendict-2.3.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c9887179a245a66a50f52afa08d4d92ae0f269839fab82285c70a0fa0dd782"}, + {file = "frozendict-2.3.4-cp310-cp310-win_amd64.whl", hash = "sha256:b98a0d65a59af6da03f794f90b0c3085a7ee14e7bf8f0ef36b079ee8aa992439"}, + {file = "frozendict-2.3.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:3d8042b7dab5e992e30889c9b71b781d5feef19b372d47d735e4d7d45846fd4a"}, + {file = "frozendict-2.3.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a6d2e8b7cf6b6e5677a1a4b53b4073e5d9ec640d1db30dc679627668d25e90"}, + {file = "frozendict-2.3.4-cp36-cp36m-win_amd64.whl", hash = "sha256:dbbe1339ac2646523e0bb00d1896085d1f70de23780e4927ca82b36ab8a044d3"}, + {file = "frozendict-2.3.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95bac22f7f09d81f378f2b3f672b7a50a974ca180feae1507f5e21bc147e8bc8"}, + {file = "frozendict-2.3.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dae686722c144b333c4dbdc16323a5de11406d26b76d2be1cc175f90afacb5ba"}, + {file = "frozendict-2.3.4-cp37-cp37m-win_amd64.whl", hash = "sha256:389f395a74eb16992217ac1521e689c1dea2d70113bcb18714669ace1ed623b9"}, + {file = "frozendict-2.3.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ccb6450a416c9cc9acef7683e637e28356e3ceeabf83521f74cc2718883076b7"}, + {file = "frozendict-2.3.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aca59108b77cadc13ba7dfea7e8f50811208c7652a13dc6c7f92d7782a24d299"}, + {file = "frozendict-2.3.4-cp38-cp38-win_amd64.whl", hash = "sha256:3ec86ebf143dd685184215c27ec416c36e0ba1b80d81b1b9482f7d380c049b4e"}, + {file = "frozendict-2.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5809e6ff6b7257043a486f7a3b73a7da71cf69a38980b4171e4741291d0d9eb3"}, + {file = "frozendict-2.3.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c550ed7fdf1962984bec21630c584d722b3ee5d5f57a0ae2527a0121dc0414a"}, + {file = "frozendict-2.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:3e93aebc6e69a8ef329bbe9afb8342bd33c7b5c7a0c480cb9f7e60b0cbe48072"}, + {file = "frozendict-2.3.4-py3-none-any.whl", hash = "sha256:d722f3d89db6ae35ef35ecc243c40c800eb344848c83dba4798353312cd37b15"}, + {file = "frozendict-2.3.4.tar.gz", hash = "sha256:15b4b18346259392b0d27598f240e9390fafbff882137a9c48a1e0104fb17f78"}, +] [[package]] name = "gitdb" @@ -260,6 +475,10 @@ description = "Git Object Database" category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"}, + {file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"}, +] [package.dependencies] smmap = ">=3.0.1,<6" @@ -271,6 +490,10 @@ description = "GitPython is a python library used to interact with Git repositor category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.30-py3-none-any.whl", hash = "sha256:cd455b0000615c60e286208ba540271af9fe531fa6a87cc590a7298785ab2882"}, + {file = "GitPython-3.1.30.tar.gz", hash = "sha256:769c2d83e13f5d938b7688479da374c4e3d49f71549aaf462b646db9602ea6f8"}, +] [package.dependencies] gitdb = ">=4.0.1,<5" @@ -283,6 +506,49 @@ description = "Python wrapper for hiredis" category = "main" optional = true python-versions = ">=3.6" +files = [ + {file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"}, + {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0adea425b764a08270820531ec2218d0508f8ae15a448568109ffcae050fee26"}, + {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:3d55e36715ff06cdc0ab62f9591607c4324297b6b6ce5b58cb9928b3defe30ea"}, + {file = "hiredis-2.0.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:5d2a48c80cf5a338d58aae3c16872f4d452345e18350143b3bf7216d33ba7b99"}, + {file = "hiredis-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:240ce6dc19835971f38caf94b5738092cb1e641f8150a9ef9251b7825506cb05"}, + {file = "hiredis-2.0.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:5dc7a94bb11096bc4bffd41a3c4f2b958257085c01522aa81140c68b8bf1630a"}, + {file = "hiredis-2.0.0-cp36-cp36m-win32.whl", hash = "sha256:139705ce59d94eef2ceae9fd2ad58710b02aee91e7fa0ccb485665ca0ecbec63"}, + {file = "hiredis-2.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:c39c46d9e44447181cd502a35aad2bb178dbf1b1f86cf4db639d7b9614f837c6"}, + {file = "hiredis-2.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:adf4dd19d8875ac147bf926c727215a0faf21490b22c053db464e0bf0deb0485"}, + {file = "hiredis-2.0.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0f41827028901814c709e744060843c77e78a3aca1e0d6875d2562372fcb405a"}, + {file = "hiredis-2.0.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:508999bec4422e646b05c95c598b64bdbef1edf0d2b715450a078ba21b385bcc"}, + {file = "hiredis-2.0.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:0d5109337e1db373a892fdcf78eb145ffb6bbd66bb51989ec36117b9f7f9b579"}, + {file = "hiredis-2.0.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:04026461eae67fdefa1949b7332e488224eac9e8f2b5c58c98b54d29af22093e"}, + {file = "hiredis-2.0.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:a00514362df15af041cc06e97aebabf2895e0a7c42c83c21894be12b84402d79"}, + {file = "hiredis-2.0.0-cp37-cp37m-win32.whl", hash = "sha256:09004096e953d7ebd508cded79f6b21e05dff5d7361771f59269425108e703bc"}, + {file = "hiredis-2.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f8196f739092a78e4f6b1b2172679ed3343c39c61a3e9d722ce6fcf1dac2824a"}, + {file = "hiredis-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:294a6697dfa41a8cba4c365dd3715abc54d29a86a40ec6405d677ca853307cfb"}, + {file = "hiredis-2.0.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3dddf681284fe16d047d3ad37415b2e9ccdc6c8986c8062dbe51ab9a358b50a5"}, + {file = "hiredis-2.0.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:dcef843f8de4e2ff5e35e96ec2a4abbdf403bd0f732ead127bd27e51f38ac298"}, + {file = "hiredis-2.0.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:87c7c10d186f1743a8fd6a971ab6525d60abd5d5d200f31e073cd5e94d7e7a9d"}, + {file = "hiredis-2.0.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:7f0055f1809b911ab347a25d786deff5e10e9cf083c3c3fd2dd04e8612e8d9db"}, + {file = "hiredis-2.0.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:11d119507bb54e81f375e638225a2c057dda748f2b1deef05c2b1a5d42686048"}, + {file = "hiredis-2.0.0-cp38-cp38-win32.whl", hash = "sha256:7492af15f71f75ee93d2a618ca53fea8be85e7b625e323315169977fae752426"}, + {file = "hiredis-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:65d653df249a2f95673976e4e9dd7ce10de61cfc6e64fa7eeaa6891a9559c581"}, + {file = "hiredis-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae8427a5e9062ba66fc2c62fb19a72276cf12c780e8db2b0956ea909c48acff5"}, + {file = "hiredis-2.0.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:3f5f7e3a4ab824e3de1e1700f05ad76ee465f5f11f5db61c4b297ec29e692b2e"}, + {file = "hiredis-2.0.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e3447d9e074abf0e3cd85aef8131e01ab93f9f0e86654db7ac8a3f73c63706ce"}, + {file = "hiredis-2.0.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:8b42c0dc927b8d7c0eb59f97e6e34408e53bc489f9f90e66e568f329bff3e443"}, + {file = "hiredis-2.0.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:b84f29971f0ad4adaee391c6364e6f780d5aae7e9226d41964b26b49376071d0"}, + {file = "hiredis-2.0.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:0b39ec237459922c6544d071cdcf92cbb5bc6685a30e7c6d985d8a3e3a75326e"}, + {file = "hiredis-2.0.0-cp39-cp39-win32.whl", hash = "sha256:a7928283143a401e72a4fad43ecc85b35c27ae699cf5d54d39e1e72d97460e1d"}, + {file = "hiredis-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:a4ee8000454ad4486fb9f28b0cab7fa1cd796fc36d639882d0b34109b5b3aec9"}, + {file = "hiredis-2.0.0-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1f03d4dadd595f7a69a75709bc81902673fa31964c75f93af74feac2f134cc54"}, + {file = "hiredis-2.0.0-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:04927a4c651a0e9ec11c68e4427d917e44ff101f761cd3b5bc76f86aaa431d27"}, + {file = "hiredis-2.0.0-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:a39efc3ade8c1fb27c097fd112baf09d7fd70b8cb10ef1de4da6efbe066d381d"}, + {file = "hiredis-2.0.0-pp36-pypy36_pp73-win32.whl", hash = "sha256:07bbf9bdcb82239f319b1f09e8ef4bdfaec50ed7d7ea51a56438f39193271163"}, + {file = "hiredis-2.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:807b3096205c7cec861c8803a6738e33ed86c9aae76cac0e19454245a6bbbc0a"}, + {file = "hiredis-2.0.0-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:1233e303645f468e399ec906b6b48ab7cd8391aae2d08daadbb5cad6ace4bd87"}, + {file = "hiredis-2.0.0-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:cb2126603091902767d96bcb74093bd8b14982f41809f85c9b96e519c7e1dc41"}, + {file = "hiredis-2.0.0-pp37-pypy37_pp73-win32.whl", hash = "sha256:f52010e0a44e3d8530437e7da38d11fb822acfb0d5b12e9cd5ba655509937ca0"}, + {file = "hiredis-2.0.0.tar.gz", hash = "sha256:81d6d8e39695f2c37954d1011c0480ef7cf444d4e3ae24bc5e89ee5de360139a"}, +] [[package]] name = "hyperlink" @@ -291,6 +557,10 @@ description = "A featureful, immutable, and correct URL for Python." category = "main" optional = false python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "hyperlink-21.0.0-py2.py3-none-any.whl", hash = "sha256:e6b14c37ecb73e89c77d78cdb4c2cc8f3fb59a885c5b3f819ff4ed80f25af1b4"}, + {file = "hyperlink-21.0.0.tar.gz", hash = "sha256:427af957daa58bc909471c6c40f74c5450fa123dd093fc53efd2e91d2705a56b"}, +] [package.dependencies] idna = ">=2.5" @@ -302,6 +572,10 @@ description = "Internationalized Domain Names in Applications (IDNA)" category = "main" optional = false python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] [[package]] name = "ijson" @@ -310,15 +584,83 @@ description = "Iterative JSON parser with standard Python iterator interfaces" category = "main" optional = false python-versions = "*" - -[[package]] -name = "importlib-metadata" -version = "6.0.0" -description = "Read metadata from Python packages" -category = "main" -optional = false -python-versions = ">=3.7" - +files = [ + {file = "ijson-3.1.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:6c1a777096be5f75ffebb335c6d2ebc0e489b231496b7f2ca903aa061fe7d381"}, + {file = "ijson-3.1.4-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:475fc25c3d2a86230b85777cae9580398b42eed422506bf0b6aacfa936f7bfcd"}, + {file = "ijson-3.1.4-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f587699b5a759e30accf733e37950cc06c4118b72e3e146edcea77dded467426"}, + {file = "ijson-3.1.4-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:339b2b4c7bbd64849dd69ef94ee21e29dcd92c831f47a281fdd48122bb2a715a"}, + {file = "ijson-3.1.4-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:446ef8980504da0af8d20d3cb6452c4dc3d8aa5fd788098985e899b913191fe6"}, + {file = "ijson-3.1.4-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:3997a2fdb28bc04b9ab0555db5f3b33ed28d91e9d42a3bf2c1842d4990beb158"}, + {file = "ijson-3.1.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:fa10a1d88473303ec97aae23169d77c5b92657b7fb189f9c584974c00a79f383"}, + {file = "ijson-3.1.4-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:9a5bf5b9d8f2ceaca131ee21fc7875d0f34b95762f4f32e4d65109ca46472147"}, + {file = "ijson-3.1.4-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:81cc8cee590c8a70cca3c9aefae06dd7cb8e9f75f3a7dc12b340c2e332d33a2a"}, + {file = "ijson-3.1.4-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4ea5fc50ba158f72943d5174fbc29ebefe72a2adac051c814c87438dc475cf78"}, + {file = "ijson-3.1.4-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:3b98861a4280cf09d267986cefa46c3bd80af887eae02aba07488d80eb798afa"}, + {file = "ijson-3.1.4-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:068c692efba9692406b86736dcc6803e4a0b6280d7f0b7534bff3faec677ff38"}, + {file = "ijson-3.1.4-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:86884ac06ac69cea6d89ab7b84683b3b4159c4013e4a20276d3fc630fe9b7588"}, + {file = "ijson-3.1.4-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:41e5886ff6fade26f10b87edad723d2db14dcbb1178717790993fcbbb8ccd333"}, + {file = "ijson-3.1.4-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:24b58933bf777d03dc1caa3006112ec7f9e6f6db6ffe1f5f5bd233cb1281f719"}, + {file = "ijson-3.1.4-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:13f80aad0b84d100fb6a88ced24bade21dc6ddeaf2bba3294b58728463194f50"}, + {file = "ijson-3.1.4-cp35-cp35m-win32.whl", hash = "sha256:fa9a25d0bd32f9515e18a3611690f1de12cb7d1320bd93e9da835936b41ad3ff"}, + {file = "ijson-3.1.4-cp35-cp35m-win_amd64.whl", hash = "sha256:c4c1bf98aaab4c8f60d238edf9bcd07c896cfcc51c2ca84d03da22aad88957c5"}, + {file = "ijson-3.1.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f0f2a87c423e8767368aa055310024fa28727f4454463714fef22230c9717f64"}, + {file = "ijson-3.1.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:15507de59d74d21501b2a076d9c49abf927eb58a51a01b8f28a0a0565db0a99f"}, + {file = "ijson-3.1.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2e6bd6ad95ab40c858592b905e2bbb4fe79bbff415b69a4923dafe841ffadcb4"}, + {file = "ijson-3.1.4-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:68e295bb12610d086990cedc89fb8b59b7c85740d66e9515aed062649605d0bf"}, + {file = "ijson-3.1.4-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:3bb461352c0f0f2ec460a4b19400a665b8a5a3a2da663a32093df1699642ee3f"}, + {file = "ijson-3.1.4-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:f91c75edd6cf1a66f02425bafc59a22ec29bc0adcbc06f4bfd694d92f424ceb3"}, + {file = "ijson-3.1.4-cp36-cp36m-win32.whl", hash = "sha256:4c53cc72f79a4c32d5fc22efb85aa22f248e8f4f992707a84bdc896cc0b1ecf9"}, + {file = "ijson-3.1.4-cp36-cp36m-win_amd64.whl", hash = "sha256:ac9098470c1ff6e5c23ec0946818bc102bfeeeea474554c8d081dc934be20988"}, + {file = "ijson-3.1.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dcd6f04df44b1945b859318010234651317db2c4232f75e3933f8bb41c4fa055"}, + {file = "ijson-3.1.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:5a2f40c053c837591636dc1afb79d85e90b9a9d65f3d9963aae31d1eb11bfed2"}, + {file = "ijson-3.1.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:f50337e3b8e72ec68441b573c2848f108a8976a57465c859b227ebd2a2342901"}, + {file = "ijson-3.1.4-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:454918f908abbed3c50a0a05c14b20658ab711b155e4f890900e6f60746dd7cc"}, + {file = "ijson-3.1.4-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:387c2ec434cc1bc7dc9bd33ec0b70d95d443cc1e5934005f26addc2284a437ab"}, + {file = "ijson-3.1.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:179ed6fd42e121d252b43a18833df2de08378fac7bce380974ef6f5e522afefa"}, + {file = "ijson-3.1.4-cp37-cp37m-win32.whl", hash = "sha256:26a6a550b270df04e3f442e2bf0870c9362db4912f0e7bdfd300f30ea43115a2"}, + {file = "ijson-3.1.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ff8cf7507d9d8939264068c2cff0a23f99703fa2f31eb3cb45a9a52798843586"}, + {file = "ijson-3.1.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:09c9d7913c88a6059cd054ff854958f34d757402b639cf212ffbec201a705a0d"}, + {file = "ijson-3.1.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:702ba9a732116d659a5e950ee176be6a2e075998ef1bcde11cbf79a77ed0f717"}, + {file = "ijson-3.1.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:667841591521158770adc90793c2bdbb47c94fe28888cb802104b8bbd61f3d51"}, + {file = "ijson-3.1.4-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:df641dd07b38c63eecd4f454db7b27aa5201193df160f06b48111ba97ab62504"}, + {file = "ijson-3.1.4-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:9348e7d507eb40b52b12eecff3d50934fcc3d2a15a2f54ec1127a36063b9ba8f"}, + {file = "ijson-3.1.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:93455902fdc33ba9485c7fae63ac95d96e0ab8942224a357113174bbeaff92e9"}, + {file = "ijson-3.1.4-cp38-cp38-win32.whl", hash = "sha256:5b725f2e984ce70d464b195f206fa44bebbd744da24139b61fec72de77c03a16"}, + {file = "ijson-3.1.4-cp38-cp38-win_amd64.whl", hash = "sha256:a5965c315fbb2dc9769dfdf046eb07daf48ae20b637da95ec8d62b629be09df4"}, + {file = "ijson-3.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b8ee7dbb07cec9ba29d60cfe4954b3cc70adb5f85bba1f72225364b59c1cf82b"}, + {file = "ijson-3.1.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d9e01c55d501e9c3d686b6ee3af351c9c0c8c3e45c5576bd5601bee3e1300b09"}, + {file = "ijson-3.1.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:297f26f27a04cd0d0a2f865d154090c48ea11b239cabe0a17a6c65f0314bd1ca"}, + {file = "ijson-3.1.4-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:9239973100338a4138d09d7a4602bd289861e553d597cd67390c33bfc452253e"}, + {file = "ijson-3.1.4-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:2a64c66a08f56ed45a805691c2fd2e1caef00edd6ccf4c4e5eff02cd94ad8364"}, + {file = "ijson-3.1.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:d17fd199f0d0a4ab6e0d541b4eec1b68b5bd5bb5d8104521e22243015b51049b"}, + {file = "ijson-3.1.4-cp39-cp39-win32.whl", hash = "sha256:70ee3c8fa0eba18c80c5911639c01a8de4089a4361bad2862a9949e25ec9b1c8"}, + {file = "ijson-3.1.4-cp39-cp39-win_amd64.whl", hash = "sha256:6bf2b64304321705d03fa5e403ec3f36fa5bb27bf661849ad62e0a3a49bc23e3"}, + {file = "ijson-3.1.4-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:5d7e3fcc3b6de76a9dba1e9fc6ca23dad18f0fa6b4e6499415e16b684b2e9af1"}, + {file = "ijson-3.1.4-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:a72eb0359ebff94754f7a2f00a6efe4c57716f860fc040c606dedcb40f49f233"}, + {file = "ijson-3.1.4-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:28fc168f5faf5759fdfa2a63f85f1f7a148bbae98f34404a6ba19f3d08e89e87"}, + {file = "ijson-3.1.4-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2844d4a38d27583897ed73f7946e205b16926b4cab2525d1ce17e8b08064c706"}, + {file = "ijson-3.1.4-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:252defd1f139b5fb8c764d78d5e3a6df81543d9878c58992a89b261369ea97a7"}, + {file = "ijson-3.1.4-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:15d5356b4d090c699f382c8eb6a2bcd5992a8c8e8b88c88bc6e54f686018328a"}, + {file = "ijson-3.1.4-pp36-pypy36_pp73-win32.whl", hash = "sha256:6774ec0a39647eea70d35fb76accabe3d71002a8701c0545b9120230c182b75b"}, + {file = "ijson-3.1.4-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f11da15ec04cc83ff0f817a65a3392e169be8d111ba81f24d6e09236597bb28c"}, + {file = "ijson-3.1.4-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:ee13ceeed9b6cf81b3b8197ef15595fc43fd54276842ed63840ddd49db0603da"}, + {file = "ijson-3.1.4-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:97e4df67235fae40d6195711223520d2c5bf1f7f5087c2963fcde44d72ebf448"}, + {file = "ijson-3.1.4-pp37-pypy37_pp73-win32.whl", hash = "sha256:3d10eee52428f43f7da28763bb79f3d90bbbeea1accb15de01e40a00885b6e89"}, + {file = "ijson-3.1.4.tar.gz", hash = "sha256:1d1003ae3c6115ec9b587d29dd136860a81a23c7626b682e2b5b12c9fd30e4ea"}, +] + +[[package]] +name = "importlib-metadata" +version = "6.0.0" +description = "Read metadata from Python packages" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"}, + {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"}, +] + [package.dependencies] typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} zipp = ">=0.5" @@ -335,6 +677,10 @@ description = "Read resources from Python packages" category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "importlib_resources-5.4.0-py3-none-any.whl", hash = "sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45"}, + {file = "importlib_resources-5.4.0.tar.gz", hash = "sha256:d756e2f85dd4de2ba89be0b21dba2a3bbec2e871a42a3a16719258a11f87506b"}, +] [package.dependencies] zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} @@ -350,6 +696,10 @@ description = "A small library that versions your Python projects." category = "main" optional = false python-versions = "*" +files = [ + {file = "incremental-21.3.0-py2.py3-none-any.whl", hash = "sha256:92014aebc6a20b78a8084cdd5645eeaa7f74b8933f70fa3ada2cfbd1e3b54321"}, + {file = "incremental-21.3.0.tar.gz", hash = "sha256:02f5de5aff48f6b9f665d99d48bfc7ec03b6e3943210de7cfc88856d755d6f57"}, +] [package.extras] scripts = ["click (>=6.0)", "twisted (>=16.4.0)"] @@ -361,6 +711,10 @@ description = "A Python utility / library to sort Python imports." category = "dev" optional = false python-versions = ">=3.7.0" +files = [ + {file = "isort-5.11.4-py3-none-any.whl", hash = "sha256:c033fd0edb91000a7f09527fe5c75321878f98322a77ddcc81adbd83724afb7b"}, + {file = "isort-5.11.4.tar.gz", hash = "sha256:6db30c5ded9815d813932c04c2f85a360bcdd35fed496f4d8f35495ef0a261b6"}, +] [package.extras] colors = ["colorama (>=0.4.3,<0.5.0)"] @@ -375,6 +729,9 @@ description = "Jaeger Python OpenTracing Tracer implementation" category = "main" optional = true python-versions = ">=3.7" +files = [ + {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, +] [package.dependencies] opentracing = ">=2.1,<3.0" @@ -392,6 +749,10 @@ description = "Low-level, pure Python DBus protocol wrapper." category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "jeepney-0.7.1-py3-none-any.whl", hash = "sha256:1b5a0ea5c0e7b166b2f5895b91a08c14de8915afda4407fb5022a195224958ac"}, + {file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"}, +] [package.extras] test = ["async-timeout", "pytest", "pytest-asyncio", "pytest-trio", "testpath", "trio"] @@ -404,6 +765,10 @@ description = "A very fast and expressive template engine." category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] [package.dependencies] MarkupSafe = ">=2.0" @@ -418,6 +783,10 @@ description = "An implementation of JSON Schema validation for Python" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"}, + {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"}, +] [package.dependencies] attrs = ">=17.4.0" @@ -438,6 +807,10 @@ description = "Store and access your passwords safely." category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"}, + {file = "keyring-23.5.0.tar.gz", hash = "sha256:9012508e141a80bd1c0b6778d5c610dd9f8c464d75ac6774248500503f972fb9"}, +] [package.dependencies] importlib-metadata = ">=3.6" @@ -456,6 +829,10 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library category = "main" optional = true python-versions = "*" +files = [ + {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, + {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, +] [package.dependencies] pyasn1 = ">=0.4.6" @@ -467,6 +844,85 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li category = "main" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, != 3.4.*" +files = [ + {file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"}, + {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"}, + {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"}, + {file = "lxml-4.9.2-cp27-cp27m-win32.whl", hash = "sha256:8d0b4612b66ff5d62d03bcaa043bb018f74dfea51184e53f067e6fdcba4bd8de"}, + {file = "lxml-4.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:4c8f293f14abc8fd3e8e01c5bd86e6ed0b6ef71936ded5bf10fe7a5efefbaca3"}, + {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2899456259589aa38bfb018c364d6ae7b53c5c22d8e27d0ec7609c2a1ff78b50"}, + {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6749649eecd6a9871cae297bffa4ee76f90b4504a2a2ab528d9ebe912b101975"}, + {file = "lxml-4.9.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a08cff61517ee26cb56f1e949cca38caabe9ea9fbb4b1e10a805dc39844b7d5c"}, + {file = "lxml-4.9.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:85cabf64adec449132e55616e7ca3e1000ab449d1d0f9d7f83146ed5bdcb6d8a"}, + {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8340225bd5e7a701c0fa98284c849c9b9fc9238abf53a0ebd90900f25d39a4e4"}, + {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:1ab8f1f932e8f82355e75dda5413a57612c6ea448069d4fb2e217e9a4bed13d4"}, + {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:699a9af7dffaf67deeae27b2112aa06b41c370d5e7633e0ee0aea2e0b6c211f7"}, + {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9cc34af337a97d470040f99ba4282f6e6bac88407d021688a5d585e44a23184"}, + {file = "lxml-4.9.2-cp310-cp310-win32.whl", hash = "sha256:d02a5399126a53492415d4906ab0ad0375a5456cc05c3fc0fc4ca11771745cda"}, + {file = "lxml-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:a38486985ca49cfa574a507e7a2215c0c780fd1778bb6290c21193b7211702ab"}, + {file = "lxml-4.9.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c83203addf554215463b59f6399835201999b5e48019dc17f182ed5ad87205c9"}, + {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2a87fa548561d2f4643c99cd13131acb607ddabb70682dcf1dff5f71f781a4bf"}, + {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:d6b430a9938a5a5d85fc107d852262ddcd48602c120e3dbb02137c83d212b380"}, + {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3efea981d956a6f7173b4659849f55081867cf897e719f57383698af6f618a92"}, + {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df0623dcf9668ad0445e0558a21211d4e9a149ea8f5666917c8eeec515f0a6d1"}, + {file = "lxml-4.9.2-cp311-cp311-win32.whl", hash = "sha256:da248f93f0418a9e9d94b0080d7ebc407a9a5e6d0b57bb30db9b5cc28de1ad33"}, + {file = "lxml-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:3818b8e2c4b5148567e1b09ce739006acfaa44ce3156f8cbbc11062994b8e8dd"}, + {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"}, + {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"}, + {file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"}, + {file = "lxml-4.9.2-cp35-cp35m-win_amd64.whl", hash = "sha256:998c7c41910666d2976928c38ea96a70d1aa43be6fe502f21a651e17483a43c5"}, + {file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"}, + {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"}, + {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"}, + {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f49e52d174375a7def9915c9f06ec4e569d235ad428f70751765f48d5926678c"}, + {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36c3c175d34652a35475a73762b545f4527aec044910a651d2bf50de9c3352b1"}, + {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a35f8b7fa99f90dd2f5dc5a9fa12332642f087a7641289ca6c40d6e1a2637d8e"}, + {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"}, + {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"}, + {file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"}, + {file = "lxml-4.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:3ab9fa9d6dc2a7f29d7affdf3edebf6ece6fb28a6d80b14c3b2fb9d39b9322c3"}, + {file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"}, + {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"}, + {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"}, + {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c9ec3eaf616d67db0764b3bb983962b4f385a1f08304fd30c7283954e6a7869b"}, + {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a29ba94d065945944016b6b74e538bdb1751a1db6ffb80c9d3c2e40d6fa9894"}, + {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a82d05da00a58b8e4c0008edbc8a4b6ec5a4bc1e2ee0fb6ed157cf634ed7fa45"}, + {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:223f4232855ade399bd409331e6ca70fb5578efef22cf4069a6090acc0f53c0e"}, + {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d17bc7c2ccf49c478c5bdd447594e82692c74222698cfc9b5daae7ae7e90743b"}, + {file = "lxml-4.9.2-cp37-cp37m-win32.whl", hash = "sha256:b64d891da92e232c36976c80ed7ebb383e3f148489796d8d31a5b6a677825efe"}, + {file = "lxml-4.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a0a336d6d3e8b234a3aae3c674873d8f0e720b76bc1d9416866c41cd9500ffb9"}, + {file = "lxml-4.9.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:da4dd7c9c50c059aba52b3524f84d7de956f7fef88f0bafcf4ad7dde94a064e8"}, + {file = "lxml-4.9.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:821b7f59b99551c69c85a6039c65b75f5683bdc63270fec660f75da67469ca24"}, + {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e5168986b90a8d1f2f9dc1b841467c74221bd752537b99761a93d2d981e04889"}, + {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e20cb5a47247e383cf4ff523205060991021233ebd6f924bca927fcf25cf86f"}, + {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13598ecfbd2e86ea7ae45ec28a2a54fb87ee9b9fdb0f6d343297d8e548392c03"}, + {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:880bbbcbe2fca64e2f4d8e04db47bcdf504936fa2b33933efd945e1b429bea8c"}, + {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7d2278d59425777cfcb19735018d897ca8303abe67cc735f9f97177ceff8027f"}, + {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5344a43228767f53a9df6e5b253f8cdca7dfc7b7aeae52551958192f56d98457"}, + {file = "lxml-4.9.2-cp38-cp38-win32.whl", hash = "sha256:925073b2fe14ab9b87e73f9a5fde6ce6392da430f3004d8b72cc86f746f5163b"}, + {file = "lxml-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:9b22c5c66f67ae00c0199f6055705bc3eb3fcb08d03d2ec4059a2b1b25ed48d7"}, + {file = "lxml-4.9.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5f50a1c177e2fa3ee0667a5ab79fdc6b23086bc8b589d90b93b4bd17eb0e64d1"}, + {file = "lxml-4.9.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:090c6543d3696cbe15b4ac6e175e576bcc3f1ccfbba970061b7300b0c15a2140"}, + {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:63da2ccc0857c311d764e7d3d90f429c252e83b52d1f8f1d1fe55be26827d1f4"}, + {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5b4545b8a40478183ac06c073e81a5ce4cf01bf1734962577cf2bb569a5b3bbf"}, + {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2e430cd2824f05f2d4f687701144556646bae8f249fd60aa1e4c768ba7018947"}, + {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6804daeb7ef69e7b36f76caddb85cccd63d0c56dedb47555d2fc969e2af6a1a5"}, + {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a6e441a86553c310258aca15d1c05903aaf4965b23f3bc2d55f200804e005ee5"}, + {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca34efc80a29351897e18888c71c6aca4a359247c87e0b1c7ada14f0ab0c0fb2"}, + {file = "lxml-4.9.2-cp39-cp39-win32.whl", hash = "sha256:6b418afe5df18233fc6b6093deb82a32895b6bb0b1155c2cdb05203f583053f1"}, + {file = "lxml-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f1496ea22ca2c830cbcbd473de8f114a320da308438ae65abad6bab7867fe38f"}, + {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b264171e3143d842ded311b7dccd46ff9ef34247129ff5bf5066123c55c2431c"}, + {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0dc313ef231edf866912e9d8f5a042ddab56c752619e92dfd3a2c277e6a7299a"}, + {file = "lxml-4.9.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:16efd54337136e8cd72fb9485c368d91d77a47ee2d42b057564aae201257d419"}, + {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0f2b1e0d79180f344ff9f321327b005ca043a50ece8713de61d1cb383fb8ac05"}, + {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7b770ed79542ed52c519119473898198761d78beb24b107acf3ad65deae61f1f"}, + {file = "lxml-4.9.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efa29c2fe6b4fdd32e8ef81c1528506895eca86e1d8c4657fda04c9b3786ddf9"}, + {file = "lxml-4.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e91ee82f4199af8c43d8158024cbdff3d931df350252288f0d4ce656df7f3b5"}, + {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b23e19989c355ca854276178a0463951a653309fb8e57ce674497f2d9f208746"}, + {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:01d36c05f4afb8f7c20fd9ed5badca32a2029b93b1750f571ccc0b142531caf7"}, + {file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"}, + {file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"}, +] [package.extras] cssselect = ["cssselect (>=0.7)"] @@ -481,6 +937,48 @@ description = "Safely add untrusted strings to HTML/XML markup." category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e104c0c2b4cd765b4e83909cde7ec61a1e313f8a75775897db321450e928cce"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24c3be29abb6b34052fd26fc7a8e0a49b1ee9d282e3665e8ad09a0a68faee5b3"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204730fd5fe2fe3b1e9ccadb2bd18ba8712b111dcabce185af0b3b5285a7c989"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d3b64c65328cb4cd252c94f83e66e3d7acf8891e60ebf588d7b493a55a1dbf26"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:96de1932237abe0a13ba68b63e94113678c379dca45afa040a17b6e1ad7ed076"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75bb36f134883fdbe13d8e63b8675f5f12b80bb6627f7714c7d6c5becf22719f"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-win32.whl", hash = "sha256:4056f752015dfa9828dce3140dbadd543b555afb3252507348c493def166d454"}, + {file = "MarkupSafe-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:d4e702eea4a2903441f2735799d217f4ac1b55f7d8ad96ab7d4e25417cb0827c"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f0eddfcabd6936558ec020130f932d479930581171368fd728efcfb6ef0dd357"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ddea4c352a488b5e1069069f2f501006b1a4362cb906bee9a193ef1245a7a61"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09c86c9643cceb1d87ca08cdc30160d1b7ab49a8a21564868921959bd16441b8"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a0abef2ca47b33fb615b491ce31b055ef2430de52c5b3fb19a4042dbc5cadb"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:736895a020e31b428b3382a7887bfea96102c529530299f426bf2e636aacec9e"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:679cbb78914ab212c49c67ba2c7396dc599a8479de51b9a87b174700abd9ea49"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:84ad5e29bf8bab3ad70fd707d3c05524862bddc54dc040982b0dbcff36481de7"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-win32.whl", hash = "sha256:8da5924cb1f9064589767b0f3fc39d03e3d0fb5aa29e0cb21d43106519bd624a"}, + {file = "MarkupSafe-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:454ffc1cbb75227d15667c09f164a0099159da0c1f3d2636aa648f12675491ad"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:142119fb14a1ef6d758912b25c4e803c3ff66920635c44078666fe7cc3f8f759"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2a5a856019d2833c56a3dcac1b80fe795c95f401818ea963594b345929dffa7"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d1fb9b2eec3c9714dd936860850300b51dbaa37404209c8d4cb66547884b7ed"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62c0285e91414f5c8f621a17b69fc0088394ccdaa961ef469e833dbff64bd5ea"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc3150f85e2dbcf99e65238c842d1cfe69d3e7649b19864c1cc043213d9cd730"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f02cf7221d5cd915d7fa58ab64f7ee6dd0f6cddbb48683debf5d04ae9b1c2cc1"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5653619b3eb5cbd35bfba3c12d575db2a74d15e0e1c08bf1db788069d410ce8"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d2f5d97fcbd004c03df8d8fe2b973fe2b14e7bfeb2cfa012eaa8759ce9a762f"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-win32.whl", hash = "sha256:3cace1837bc84e63b3fd2dfce37f08f8c18aeb81ef5cf6bb9b51f625cb4e6cd8"}, + {file = "MarkupSafe-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:fabbe18087c3d33c5824cb145ffca52eccd053061df1d79d4b66dafa5ad2a5ea"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:023af8c54fe63530545f70dd2a2a7eed18d07a9a77b94e8bf1e2ff7f252db9a3"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d66624f04de4af8bbf1c7f21cc06649c1c69a7f84109179add573ce35e46d448"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c532d5ab79be0199fa2658e24a02fce8542df196e60665dd322409a03db6a52c"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ec74fada3841b8c5f4c4f197bea916025cb9aa3fe5abf7d52b655d042f956"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c653fde75a6e5eb814d2a0a89378f83d1d3f502ab710904ee585c38888816c"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:961eb86e5be7d0973789f30ebcf6caab60b844203f4396ece27310295a6082c7"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:598b65d74615c021423bd45c2bc5e9b59539c875a9bdb7e5f2a6b92dfcfc268d"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:599941da468f2cf22bf90a84f6e2a65524e87be2fce844f96f2dd9a6c9d1e635"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-win32.whl", hash = "sha256:e6f7f3f41faffaea6596da86ecc2389672fa949bd035251eab26dc6697451d05"}, + {file = "MarkupSafe-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:b8811d48078d1cf2a6863dafb896e68406c5f513048451cd2ded0473133473c7"}, + {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"}, +] [[package]] name = "matrix-common" @@ -489,6 +987,10 @@ description = "Common utilities for Synapse, Sydent and Sygnal" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "matrix_common-1.3.0-py3-none-any.whl", hash = "sha256:524e2785b9b03be4d15f3a8a6b857c5b6af68791ffb1b9918f0ad299abc4db20"}, + {file = "matrix_common-1.3.0.tar.gz", hash = "sha256:62e121cccd9f243417b57ec37a76dc44aeb198a7a5c67afd6b8275992ff2abd1"}, +] [package.dependencies] attrs = "*" @@ -505,6 +1007,10 @@ description = "An LDAP3 auth provider for Synapse" category = "main" optional = true python-versions = ">=3.7" +files = [ + {file = "matrix-synapse-ldap3-0.2.2.tar.gz", hash = "sha256:b388d95693486eef69adaefd0fd9e84463d52fe17b0214a00efcaa669b73cb74"}, + {file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"}, +] [package.dependencies] ldap3 = ">=2.8" @@ -521,41 +1027,129 @@ description = "MessagePack serializer" category = "main" optional = false python-versions = "*" - -[[package]] -name = "mypy" -version = "0.981" -description = "Optional static typing for Python" -category = "dev" -optional = false -python-versions = ">=3.7" - -[package.dependencies] -mypy-extensions = ">=0.4.3" -tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} -typing-extensions = ">=3.10" - -[package.extras] -dmypy = ["psutil (>=4.0)"] -python2 = ["typed-ast (>=1.4.0,<2)"] -reports = ["lxml"] - -[[package]] -name = "mypy-extensions" -version = "0.4.3" -description = "Experimental type system extensions for programs checked with the mypy typechecker." -category = "dev" -optional = false -python-versions = "*" - -[[package]] -name = "mypy-zope" +files = [ + {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"}, + {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"}, + {file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"}, + {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"}, + {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"}, + {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"}, + {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"}, + {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"}, + {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"}, + {file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"}, + {file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"}, + {file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"}, + {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"}, + {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"}, + {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"}, + {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"}, + {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"}, + {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"}, + {file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"}, + {file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"}, + {file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"}, + {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"}, + {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"}, + {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"}, + {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"}, + {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"}, + {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"}, + {file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"}, + {file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"}, + {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"}, + {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"}, + {file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"}, + {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"}, + {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"}, + {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"}, + {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"}, + {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"}, + {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"}, + {file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"}, + {file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"}, + {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"}, + {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"}, + {file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"}, + {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"}, + {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"}, + {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"}, + {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"}, + {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"}, + {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"}, + {file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"}, + {file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"}, + {file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"}, +] + +[[package]] +name = "mypy" +version = "0.981" +description = "Optional static typing for Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mypy-0.981-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0"}, + {file = "mypy-0.981-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08"}, + {file = "mypy-0.981-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d"}, + {file = "mypy-0.981-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49"}, + {file = "mypy-0.981-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279"}, + {file = "mypy-0.981-cp310-cp310-win_amd64.whl", hash = "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e"}, + {file = "mypy-0.981-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659"}, + {file = "mypy-0.981-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be"}, + {file = "mypy-0.981-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d"}, + {file = "mypy-0.981-cp37-cp37m-win_amd64.whl", hash = "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae"}, + {file = "mypy-0.981-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30"}, + {file = "mypy-0.981-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb"}, + {file = "mypy-0.981-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a"}, + {file = "mypy-0.981-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"}, + {file = "mypy-0.981-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08"}, + {file = "mypy-0.981-cp38-cp38-win_amd64.whl", hash = "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6"}, + {file = "mypy-0.981-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d"}, + {file = "mypy-0.981-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7"}, + {file = "mypy-0.981-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c"}, + {file = "mypy-0.981-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362"}, + {file = "mypy-0.981-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1"}, + {file = "mypy-0.981-cp39-cp39-win_amd64.whl", hash = "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92"}, + {file = "mypy-0.981-py3-none-any.whl", hash = "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8"}, + {file = "mypy-0.981.tar.gz", hash = "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4"}, +] + +[package.dependencies] +mypy-extensions = ">=0.4.3" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} +typing-extensions = ">=3.10" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "0.4.3" +description = "Experimental type system extensions for programs checked with the mypy typechecker." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, + {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, +] + +[[package]] +name = "mypy-zope" version = "0.3.11" description = "Plugin for mypy to support zope interfaces" category = "dev" optional = false python-versions = "*" +files = [ + {file = "mypy-zope-0.3.11.tar.gz", hash = "sha256:d4255f9f04d48c79083bbd4e2fea06513a6ac7b8de06f8c4ce563fd85142ca05"}, + {file = "mypy_zope-0.3.11-py3-none-any.whl", hash = "sha256:ec080a6508d1f7805c8d2054f9fdd13c849742ce96803519e1fdfa3d3cab7140"}, +] [package.dependencies] mypy = "0.981" @@ -572,6 +1166,10 @@ description = "A network address manipulation library for Python" category = "main" optional = false python-versions = "*" +files = [ + {file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"}, + {file = "netaddr-0.8.0.tar.gz", hash = "sha256:d6cc57c7a07b1d9d2e917aa8b36ae8ce61c35ba3fcd1b83ca31c5a0ee2b5a243"}, +] [[package]] name = "opentracing" @@ -580,6 +1178,9 @@ description = "OpenTracing API for Python. See documentation at http://opentraci category = "main" optional = true python-versions = "*" +files = [ + {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, +] [package.extras] tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pytest", "pytest-cov", "pytest-mock", "six (>=1.10.0,<2.0)", "sphinx_rtd_theme", "tornado"] @@ -591,6 +1192,10 @@ description = "Core utilities for Python packages" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "packaging-22.0-py3-none-any.whl", hash = "sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3"}, + {file = "packaging-22.0.tar.gz", hash = "sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3"}, +] [[package]] name = "parameterized" @@ -599,6 +1204,10 @@ description = "Parameterized testing with any Python test framework" category = "main" optional = false python-versions = "*" +files = [ + {file = "parameterized-0.8.1-py2.py3-none-any.whl", hash = "sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9"}, + {file = "parameterized-0.8.1.tar.gz", hash = "sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c"}, +] [package.extras] dev = ["jinja2"] @@ -610,6 +1219,10 @@ description = "Utility library for gitignore style pattern matching of file path category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +files = [ + {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, + {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, +] [[package]] name = "phonenumbers" @@ -618,6 +1231,10 @@ description = "Python version of Google's common library for parsing, formatting category = "main" optional = false python-versions = "*" +files = [ + {file = "phonenumbers-8.13.2-py2.py3-none-any.whl", hash = "sha256:884b26f775205261f4dc861371dce217c1661a4942fb3ec3624e290fb51869bf"}, + {file = "phonenumbers-8.13.2.tar.gz", hash = "sha256:0179f688d48c0e7e161eb7b9d86d587940af1f5174f97c1fdfd893c599c0d94a"}, +] [[package]] name = "pillow" @@ -626,6 +1243,85 @@ description = "Python Imaging Library (Fork)" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"}, + {file = "Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"}, + {file = "Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"}, + {file = "Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"}, + {file = "Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"}, + {file = "Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"}, + {file = "Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"}, + {file = "Pillow-9.4.0-2-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:9d9a62576b68cd90f7075876f4e8444487db5eeea0e4df3ba298ee38a8d067b0"}, + {file = "Pillow-9.4.0-2-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:87708d78a14d56a990fbf4f9cb350b7d89ee8988705e58e39bdf4d82c149210f"}, + {file = "Pillow-9.4.0-2-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8a2b5874d17e72dfb80d917213abd55d7e1ed2479f38f001f264f7ce7bae757c"}, + {file = "Pillow-9.4.0-2-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:83125753a60cfc8c412de5896d10a0a405e0bd88d0470ad82e0869ddf0cb3848"}, + {file = "Pillow-9.4.0-2-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:9e5f94742033898bfe84c93c831a6f552bb629448d4072dd312306bab3bd96f1"}, + {file = "Pillow-9.4.0-2-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:013016af6b3a12a2f40b704677f8b51f72cb007dac785a9933d5c86a72a7fe33"}, + {file = "Pillow-9.4.0-2-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:99d92d148dd03fd19d16175b6d355cc1b01faf80dae93c6c3eb4163709edc0a9"}, + {file = "Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"}, + {file = "Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"}, + {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"}, + {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"}, + {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"}, + {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"}, + {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"}, + {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"}, + {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"}, + {file = "Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"}, + {file = "Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"}, + {file = "Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"}, + {file = "Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"}, + {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"}, + {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"}, + {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"}, + {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"}, + {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"}, + {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"}, + {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"}, + {file = "Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"}, + {file = "Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"}, + {file = "Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"}, + {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"}, + {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"}, + {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"}, + {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"}, + {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"}, + {file = "Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"}, + {file = "Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"}, + {file = "Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"}, + {file = "Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"}, + {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"}, + {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"}, + {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"}, + {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"}, + {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"}, + {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"}, + {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"}, + {file = "Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"}, + {file = "Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"}, + {file = "Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"}, + {file = "Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"}, + {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"}, + {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"}, + {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"}, + {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"}, + {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"}, + {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"}, + {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"}, + {file = "Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"}, + {file = "Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"}, + {file = "Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"}, + {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"}, + {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"}, + {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"}, + {file = "Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"}, + {file = "Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"}, + {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"}, + {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"}, + {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"}, + {file = "Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"}, + {file = "Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"}, +] [package.extras] docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-issues (>=3.0.1)", "sphinx-removed-in", "sphinxext-opengraph"] @@ -638,6 +1334,10 @@ description = "Query metadatdata from sdists / bdists / installed packages." category = "dev" optional = false python-versions = "*" +files = [ + {file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"}, + {file = "pkginfo-1.8.2.tar.gz", hash = "sha256:542e0d0b6750e2e21c20179803e40ab50598d8066d51097a0e382cba9eb02bff"}, +] [package.extras] testing = ["coverage", "nose"] @@ -649,6 +1349,10 @@ description = "Resolve a name to an object." category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] [[package]] name = "platformdirs" @@ -657,6 +1361,10 @@ description = "A small Python module for determining appropriate platform-specif category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"}, + {file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"}, +] [package.extras] docs = ["Sphinx (>=4)", "furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)"] @@ -669,6 +1377,10 @@ description = "Python client for the Prometheus monitoring system." category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "prometheus_client-0.15.0-py3-none-any.whl", hash = "sha256:db7c05cbd13a0f79975592d112320f2605a325969b270a94b71dcabc47b931d2"}, + {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"}, +] [package.extras] twisted = ["twisted"] @@ -680,6 +1392,21 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter" category = "main" optional = true python-versions = ">=3.6" +files = [ + {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"}, + {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"}, + {file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"}, + {file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"}, + {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"}, + {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"}, + {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"}, + {file = "psycopg2-2.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1e5a38aa85bd660c53947bd28aeaafb6a97d70423606f1ccb044a03a1203fe4a"}, + {file = "psycopg2-2.9.5-cp38-cp38-win32.whl", hash = "sha256:f5b6320dbc3cf6cfb9f25308286f9f7ab464e65cfb105b64cc9c52831748ced2"}, + {file = "psycopg2-2.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:1a5c7d7d577e0eabfcf15eb87d1e19314c8c4f0e722a301f98e0e3a65e238b4e"}, + {file = "psycopg2-2.9.5-cp39-cp39-win32.whl", hash = "sha256:322fd5fca0b1113677089d4ebd5222c964b1760e361f151cbb2706c4912112c5"}, + {file = "psycopg2-2.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:190d51e8c1b25a47484e52a79638a8182451d6f6dff99f26ad9bd81e5359a0fa"}, + {file = "psycopg2-2.9.5.tar.gz", hash = "sha256:a5246d2e683a972e2187a8714b5c2cf8156c064629f9a9b1a873c1730d9e245a"}, +] [[package]] name = "psycopg2cffi" @@ -688,6 +1415,9 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas category = "main" optional = true python-versions = "*" +files = [ + {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"}, +] [package.dependencies] cffi = ">=1.0" @@ -700,6 +1430,9 @@ description = "A Simple library to enable psycopg2 compatability" category = "main" optional = true python-versions = "*" +files = [ + {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"}, +] [package.dependencies] psycopg2 = "*" @@ -711,6 +1444,10 @@ description = "ASN.1 types and codecs" category = "main" optional = false python-versions = "*" +files = [ + {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, + {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, +] [[package]] name = "pyasn1-modules" @@ -719,6 +1456,10 @@ description = "A collection of ASN.1-based protocols modules." category = "main" optional = false python-versions = "*" +files = [ + {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"}, + {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, +] [package.dependencies] pyasn1 = ">=0.4.6,<0.5.0" @@ -730,6 +1471,10 @@ description = "C parser in Python" category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] [[package]] name = "pydantic" @@ -738,21 +1483,63 @@ description = "Data validation and settings management using python type hints" category = "main" optional = false python-versions = ">=3.7" - -[package.dependencies] -typing-extensions = ">=4.2.0" - -[package.extras] -dotenv = ["python-dotenv (>=0.10.4)"] -email = ["email-validator (>=1.0.3)"] - -[[package]] -name = "pygithub" -version = "1.57" -description = "Use the full Github API v3" -category = "dev" -optional = false -python-versions = ">=3.7" +files = [ + {file = "pydantic-1.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5635de53e6686fe7a44b5cf25fcc419a0d5e5c1a1efe73d49d48fe7586db854"}, + {file = "pydantic-1.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6dc1cc241440ed7ca9ab59d9929075445da6b7c94ced281b3dd4cfe6c8cff817"}, + {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51bdeb10d2db0f288e71d49c9cefa609bca271720ecd0c58009bd7504a0c464c"}, + {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78cec42b95dbb500a1f7120bdf95c401f6abb616bbe8785ef09887306792e66e"}, + {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8775d4ef5e7299a2f4699501077a0defdaac5b6c4321173bcb0f3c496fbadf85"}, + {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:572066051eeac73d23f95ba9a71349c42a3e05999d0ee1572b7860235b850cc6"}, + {file = "pydantic-1.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:7feb6a2d401f4d6863050f58325b8d99c1e56f4512d98b11ac64ad1751dc647d"}, + {file = "pydantic-1.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39f4a73e5342b25c2959529f07f026ef58147249f9b7431e1ba8414a36761f53"}, + {file = "pydantic-1.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:983e720704431a6573d626b00662eb78a07148c9115129f9b4351091ec95ecc3"}, + {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d52162fe6b2b55964fbb0af2ee58e99791a3138588c482572bb6087953113a"}, + {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdf8d759ef326962b4678d89e275ffc55b7ce59d917d9f72233762061fd04a2d"}, + {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05a81b006be15655b2a1bae5faa4280cf7c81d0e09fcb49b342ebf826abe5a72"}, + {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d88c4c0e5c5dfd05092a4b271282ef0588e5f4aaf345778056fc5259ba098857"}, + {file = "pydantic-1.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:6a05a9db1ef5be0fe63e988f9617ca2551013f55000289c671f71ec16f4985e3"}, + {file = "pydantic-1.10.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:887ca463c3bc47103c123bc06919c86720e80e1214aab79e9b779cda0ff92a00"}, + {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdf88ab63c3ee282c76d652fc86518aacb737ff35796023fae56a65ced1a5978"}, + {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a48f1953c4a1d9bd0b5167ac50da9a79f6072c63c4cef4cf2a3736994903583e"}, + {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a9f2de23bec87ff306aef658384b02aa7c32389766af3c5dee9ce33e80222dfa"}, + {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cd8702c5142afda03dc2b1ee6bc358b62b3735b2cce53fc77b31ca9f728e4bc8"}, + {file = "pydantic-1.10.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6e7124d6855b2780611d9f5e1e145e86667eaa3bd9459192c8dc1a097f5e9903"}, + {file = "pydantic-1.10.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b53e1d41e97063d51a02821b80538053ee4608b9a181c1005441f1673c55423"}, + {file = "pydantic-1.10.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:55b1625899acd33229c4352ce0ae54038529b412bd51c4915349b49ca575258f"}, + {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:301d626a59edbe5dfb48fcae245896379a450d04baeed50ef40d8199f2733b06"}, + {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6f9d649892a6f54a39ed56b8dfd5e08b5f3be5f893da430bed76975f3735d15"}, + {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7b5a3821225f5c43496c324b0d6875fde910a1c2933d726a743ce328fbb2a8c"}, + {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f2f7eb6273dd12472d7f218e1fef6f7c7c2f00ac2e1ecde4db8824c457300416"}, + {file = "pydantic-1.10.4-cp38-cp38-win_amd64.whl", hash = "sha256:4b05697738e7d2040696b0a66d9f0a10bec0efa1883ca75ee9e55baf511909d6"}, + {file = "pydantic-1.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a9a6747cac06c2beb466064dda999a13176b23535e4c496c9d48e6406f92d42d"}, + {file = "pydantic-1.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb992a1ef739cc7b543576337bebfc62c0e6567434e522e97291b251a41dad7f"}, + {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:990406d226dea0e8f25f643b370224771878142155b879784ce89f633541a024"}, + {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e82a6d37a95e0b1b42b82ab340ada3963aea1317fd7f888bb6b9dfbf4fff57c"}, + {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9193d4f4ee8feca58bc56c8306bcb820f5c7905fd919e0750acdeeeef0615b28"}, + {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2b3ce5f16deb45c472dde1a0ee05619298c864a20cded09c4edd820e1454129f"}, + {file = "pydantic-1.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:9cbdc268a62d9a98c56e2452d6c41c0263d64a2009aac69246486f01b4f594c4"}, + {file = "pydantic-1.10.4-py3-none-any.whl", hash = "sha256:4948f264678c703f3877d1c8877c4e3b2e12e549c57795107f08cf70c6ec7774"}, + {file = "pydantic-1.10.4.tar.gz", hash = "sha256:b9a3859f24eb4e097502a3be1fb4b2abb79b6103dd9e2e0edb70613a4459a648"}, +] + +[package.dependencies] +typing-extensions = ">=4.2.0" + +[package.extras] +dotenv = ["python-dotenv (>=0.10.4)"] +email = ["email-validator (>=1.0.3)"] + +[[package]] +name = "pygithub" +version = "1.57" +description = "Use the full Github API v3" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"}, + {file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"}, +] [package.dependencies] deprecated = "*" @@ -770,6 +1557,10 @@ description = "Pygments is a syntax highlighting package written in Python." category = "dev" optional = false python-versions = ">=3.5" +files = [ + {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"}, + {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"}, +] [[package]] name = "pyicu" @@ -778,6 +1569,9 @@ description = "Python extension wrapping the ICU C++ API" category = "main" optional = true python-versions = "*" +files = [ + {file = "PyICU-2.10.2.tar.gz", hash = "sha256:0c3309eea7fab6857507ace62403515b60fe096cbfb4f90d14f55ff75c5441c1"}, +] [[package]] name = "pyjwt" @@ -786,6 +1580,10 @@ description = "JSON Web Token implementation in Python" category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"}, + {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"}, +] [package.extras] crypto = ["cryptography (>=3.3.1)"] @@ -800,6 +1598,10 @@ description = "Macaroon library for Python" category = "main" optional = false python-versions = "*" +files = [ + {file = "pymacaroons-0.13.0-py2.py3-none-any.whl", hash = "sha256:3e14dff6a262fdbf1a15e769ce635a8aea72e6f8f91e408f9a97166c53b91907"}, + {file = "pymacaroons-0.13.0.tar.gz", hash = "sha256:1e6bba42a5f66c245adf38a5a4006a99dcc06a0703786ea636098667d42903b8"}, +] [package.dependencies] PyNaCl = ">=1.1.2,<2.0" @@ -812,6 +1614,10 @@ description = "A development tool to measure, monitor and analyze the memory beh category = "main" optional = true python-versions = ">=3.6" +files = [ + {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"}, + {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"}, +] [[package]] name = "pynacl" @@ -820,6 +1626,18 @@ description = "Python binding to the Networking and Cryptography (NaCl) library" category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, + {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, + {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, + {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, + {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, +] [package.dependencies] cffi = ">=1.4.1" @@ -835,6 +1653,10 @@ description = "Python wrapper module around the OpenSSL library" category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "pyOpenSSL-23.0.0-py3-none-any.whl", hash = "sha256:df5fc28af899e74e19fccb5510df423581047e10ab6f1f4ba1763ff5fde844c0"}, + {file = "pyOpenSSL-23.0.0.tar.gz", hash = "sha256:c1cc5f86bcacefc84dada7d31175cae1b1518d5f60d3d0bb595a67822a868a6f"}, +] [package.dependencies] cryptography = ">=38.0.0,<40" @@ -850,6 +1672,29 @@ description = "Persistent/Functional/Immutable data structures" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"}, + {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26"}, + {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e"}, + {file = "pyrsistent-0.18.1-cp310-cp310-win32.whl", hash = "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6"}, + {file = "pyrsistent-0.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec"}, + {file = "pyrsistent-0.18.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b"}, + {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc"}, + {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22"}, + {file = "pyrsistent-0.18.1-cp37-cp37m-win32.whl", hash = "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8"}, + {file = "pyrsistent-0.18.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286"}, + {file = "pyrsistent-0.18.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"}, + {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec"}, + {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c"}, + {file = "pyrsistent-0.18.1-cp38-cp38-win32.whl", hash = "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca"}, + {file = "pyrsistent-0.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a"}, + {file = "pyrsistent-0.18.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5"}, + {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045"}, + {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c"}, + {file = "pyrsistent-0.18.1-cp39-cp39-win32.whl", hash = "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc"}, + {file = "pyrsistent-0.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07"}, + {file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"}, +] [[package]] name = "pysaml2" @@ -858,6 +1703,10 @@ description = "Python implementation of SAML Version 2 Standard" category = "main" optional = true python-versions = "<4,>=3.6" +files = [ + {file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"}, + {file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"}, +] [package.dependencies] cryptography = ">=3.1" @@ -881,6 +1730,10 @@ description = "Extensions to the standard Python datetime module" category = "main" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] [package.dependencies] six = ">=1.5" @@ -892,6 +1745,10 @@ description = "World timezone definitions, modern and historical" category = "main" optional = true python-versions = "*" +files = [ + {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"}, + {file = "pytz-2021.3.tar.gz", hash = "sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326"}, +] [[package]] name = "pywin32-ctypes" @@ -900,6 +1757,10 @@ description = "" category = "dev" optional = false python-versions = "*" +files = [ + {file = "pywin32-ctypes-0.2.0.tar.gz", hash = "sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942"}, + {file = "pywin32_ctypes-0.2.0-py2.py3-none-any.whl", hash = "sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98"}, +] [[package]] name = "pyyaml" @@ -908,6 +1769,48 @@ description = "YAML parser and emitter for Python" category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, + {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, + {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, + {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, + {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, + {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, + {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, + {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, + {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, + {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, + {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, + {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, + {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, + {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, + {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, + {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, + {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, + {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, +] [[package]] name = "readme-renderer" @@ -916,6 +1819,10 @@ description = "readme_renderer is a library for rendering \"readme\" description category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "readme_renderer-37.2-py3-none-any.whl", hash = "sha256:d3f06a69e8c40fca9ab3174eca48f96d9771eddb43517b17d96583418427b106"}, + {file = "readme_renderer-37.2.tar.gz", hash = "sha256:e8ad25293c98f781dbc2c5a36a309929390009f902f99e1798c761aaf04a7923"}, +] [package.dependencies] bleach = ">=2.1.0" @@ -932,6 +1839,10 @@ description = "Python HTTP for Humans." category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, + {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"}, +] [package.dependencies] certifi = ">=2017.4.17" @@ -950,6 +1861,10 @@ description = "A utility belt for advanced users of python-requests" category = "dev" optional = false python-versions = "*" +files = [ + {file = "requests-toolbelt-0.9.1.tar.gz", hash = "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0"}, + {file = "requests_toolbelt-0.9.1-py2.py3-none-any.whl", hash = "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f"}, +] [package.dependencies] requests = ">=2.0.1,<3.0.0" @@ -961,6 +1876,10 @@ description = "Validating URI References per RFC 3986" category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, + {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"}, +] [package.extras] idna2008 = ["idna"] @@ -972,6 +1891,10 @@ description = "Render rich text, tables, progress bars, syntax highlighting, mar category = "dev" optional = false python-versions = ">=3.6.3,<4.0.0" +files = [ + {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"}, + {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"}, +] [package.dependencies] commonmark = ">=0.9.0,<0.10.0" @@ -988,6 +1911,24 @@ description = "An extremely fast Python linter, written in Rust." category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "ruff-0.0.215-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:a4963613bca6ffc448deca1ce3a3fc69af216d6234e5d7f256935d7407088724"}, + {file = "ruff-0.0.215-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:7bcd7b07a88c6530bb4e80850d6cf261081b9d4147eb0ea91fbb85a332ba4fe6"}, + {file = "ruff-0.0.215-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf3fcbf717a1e0c480b3d1fe9fd823043af463f067ec896746dab2123c4dcf10"}, + {file = "ruff-0.0.215-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8199acc4a20d2b3761c4489171f45f37654f2d5ce096361221ea392f078b4be0"}, + {file = "ruff-0.0.215-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f68624344209d07000aba115eeac551f362e278970112f0b69838c70f77f7df"}, + {file = "ruff-0.0.215-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f3c846df8a83445c394e6be58b8e784ec8fc82d67de94f137026c43e6037958b"}, + {file = "ruff-0.0.215-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65b13019821af35a3225a64f2c93877c1e8059b92bb13fce32281ceefeecd199"}, + {file = "ruff-0.0.215-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a02cb67a7171418c5a90ad0d8f983b5fd29b321c9861e0164d126cda4869c61"}, + {file = "ruff-0.0.215-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d69e654d977842c327f26487ef9b7dba39204b113619d33b4139bd3fdd101c"}, + {file = "ruff-0.0.215-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ed2a0e13c822f8f0c40e6fe6172ff9c88add55a1dac9e0c05315618f82375648"}, + {file = "ruff-0.0.215-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1fd4b80bf34e20d18b01bf6d981973975184a85ed39f64934e11d00e2aba882f"}, + {file = "ruff-0.0.215-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5360b476b8720fa76d9dd6ee980c563b930a08524c91c99edddb25364ef656d7"}, + {file = "ruff-0.0.215-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:352534c0c2ffd491b331fe5982b1d3e88a6c2083a3c127466d4eed63918f6ea8"}, + {file = "ruff-0.0.215-py3-none-win32.whl", hash = "sha256:af3cd199a0c6f5b90b9c84a2b9b74b202901194b8b00d5d3e28a0a814037b73f"}, + {file = "ruff-0.0.215-py3-none-win_amd64.whl", hash = "sha256:aa6fe5b56b17a04c8db7f60fef21a9ff96109d10d9232b436ae2dfdc6cc70b7c"}, + {file = "ruff-0.0.215.tar.gz", hash = "sha256:a82ab1452396d5ca389bdcb182e8f273c5f7db854022d7a303764b6218e9e77e"}, +] [[package]] name = "secretstorage" @@ -996,6 +1937,10 @@ description = "Python bindings to FreeDesktop.org Secret Service API" category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"}, + {file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"}, +] [package.dependencies] cryptography = ">=2.0" @@ -1008,6 +1953,10 @@ description = "A library implementing the 'SemVer' scheme." category = "main" optional = false python-versions = ">=2.7" +files = [ + {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"}, + {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, +] [package.extras] dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"] @@ -1020,6 +1969,10 @@ description = "Python client for Sentry (https://sentry.io)" category = "main" optional = true python-versions = "*" +files = [ + {file = "sentry-sdk-1.12.1.tar.gz", hash = "sha256:5bbe4b72de22f9ac1e67f2a4e6efe8fbd595bb59b7b223443f50fe5802a5551c"}, + {file = "sentry_sdk-1.12.1-py2.py3-none-any.whl", hash = "sha256:9f0b960694e2d8bb04db4ba6ac2a645040caef4e762c65937998ff06064f10d6"}, +] [package.dependencies] certifi = "*" @@ -1054,6 +2007,10 @@ description = "Service identity verification for pyOpenSSL & cryptography." category = "main" optional = false python-versions = "*" +files = [ + {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"}, + {file = "service_identity-21.1.0-py2.py3-none-any.whl", hash = "sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db"}, +] [package.dependencies] attrs = ">=19.1.0" @@ -1075,6 +2032,10 @@ description = "Easily download, build, install, upgrade, and uninstall Python pa category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "setuptools-65.5.1-py3-none-any.whl", hash = "sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31"}, + {file = "setuptools-65.5.1.tar.gz", hash = "sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f"}, +] [package.extras] docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] @@ -1088,6 +2049,10 @@ description = "Setuptools Rust extension plugin" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "setuptools-rust-1.5.2.tar.gz", hash = "sha256:d8daccb14dc0eae1b6b6eb3ecef79675bd37b4065369f79c35393dd5c55652c7"}, + {file = "setuptools_rust-1.5.2-py3-none-any.whl", hash = "sha256:8eb45851e34288f2296cd5ab9e924535ac1757318b730a13fe6836867843f206"}, +] [package.dependencies] semantic-version = ">=2.8.2,<3" @@ -1101,6 +2066,10 @@ description = "Sign JSON with Ed25519 signatures" category = "main" optional = false python-versions = "*" +files = [ + {file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"}, + {file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"}, +] [package.dependencies] canonicaljson = ">=1.0.0" @@ -1119,57 +2088,142 @@ description = "Simple, fast, extensible JSON encoder/decoder for Python" category = "main" optional = false python-versions = ">=2.5, !=3.0.*, !=3.1.*, !=3.2.*" - -[[package]] -name = "six" -version = "1.16.0" -description = "Python 2 and 3 compatibility utilities" -category = "main" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" - -[[package]] -name = "smmap" -version = "5.0.0" -description = "A pure Python implementation of a sliding window memory map manager" -category = "dev" -optional = false -python-versions = ">=3.6" - -[[package]] -name = "sortedcontainers" -version = "2.4.0" -description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" -category = "main" -optional = false -python-versions = "*" - -[[package]] -name = "systemd-python" -version = "234" -description = "Python interface for libsystemd" -category = "main" -optional = true -python-versions = "*" - -[[package]] -name = "threadloop" -version = "1.0.2" -description = "Tornado IOLoop Backed Concurrent Futures" -category = "main" -optional = true -python-versions = "*" - -[package.dependencies] -tornado = "*" - -[[package]] +files = [ + {file = "simplejson-3.17.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a89acae02b2975b1f8e4974cb8cdf9bf9f6c91162fb8dec50c259ce700f2770a"}, + {file = "simplejson-3.17.6-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:82ff356ff91be0ab2293fc6d8d262451eb6ac4fd999244c4b5f863e049ba219c"}, + {file = "simplejson-3.17.6-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0de783e9c2b87bdd75b57efa2b6260c24b94605b5c9843517577d40ee0c3cc8a"}, + {file = "simplejson-3.17.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:d24a9e61df7a7787b338a58abfba975414937b609eb6b18973e25f573bc0eeeb"}, + {file = "simplejson-3.17.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:e8603e691580487f11306ecb066c76f1f4a8b54fb3bdb23fa40643a059509366"}, + {file = "simplejson-3.17.6-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:9b01e7b00654115965a206e3015f0166674ec1e575198a62a977355597c0bef5"}, + {file = "simplejson-3.17.6-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:37bc0cf0e5599f36072077e56e248f3336917ded1d33d2688624d8ed3cefd7d2"}, + {file = "simplejson-3.17.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:cf6e7d5fe2aeb54898df18db1baf479863eae581cce05410f61f6b4188c8ada1"}, + {file = "simplejson-3.17.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:bdfc54b4468ed4cd7415928cbe782f4d782722a81aeb0f81e2ddca9932632211"}, + {file = "simplejson-3.17.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd16302d39c4d6f4afde80edd0c97d4db643327d355a312762ccd9bd2ca515ed"}, + {file = "simplejson-3.17.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:deac4bdafa19bbb89edfb73b19f7f69a52d0b5bd3bb0c4ad404c1bbfd7b4b7fd"}, + {file = "simplejson-3.17.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8bbdb166e2fb816e43ab034c865147edafe28e1b19c72433147789ac83e2dda"}, + {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7854326920d41c3b5d468154318fe6ba4390cb2410480976787c640707e0180"}, + {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:04e31fa6ac8e326480703fb6ded1488bfa6f1d3f760d32e29dbf66d0838982ce"}, + {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f63600ec06982cdf480899026f4fda622776f5fabed9a869fdb32d72bc17e99a"}, + {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e03c3b8cc7883a54c3f34a6a135c4a17bc9088a33f36796acdb47162791b02f6"}, + {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a2d30d6c1652140181dc6861f564449ad71a45e4f165a6868c27d36745b65d40"}, + {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a1aa6e4cae8e3b8d5321be4f51c5ce77188faf7baa9fe1e78611f93a8eed2882"}, + {file = "simplejson-3.17.6-cp310-cp310-win32.whl", hash = "sha256:97202f939c3ff341fc3fa84d15db86156b1edc669424ba20b0a1fcd4a796a045"}, + {file = "simplejson-3.17.6-cp310-cp310-win_amd64.whl", hash = "sha256:80d3bc9944be1d73e5b1726c3bbfd2628d3d7fe2880711b1eb90b617b9b8ac70"}, + {file = "simplejson-3.17.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9fa621b3c0c05d965882c920347b6593751b7ab20d8fa81e426f1735ca1a9fc7"}, + {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd2fb11922f58df8528adfca123f6a84748ad17d066007e7ac977720063556bd"}, + {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:724c1fe135aa437d5126138d977004d165a3b5e2ee98fc4eb3e7c0ef645e7e27"}, + {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4ff4ac6ff3aa8f814ac0f50bf218a2e1a434a17aafad4f0400a57a8cc62ef17f"}, + {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:67093a526e42981fdd954868062e56c9b67fdd7e712616cc3265ad0c210ecb51"}, + {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:5d6b4af7ad7e4ac515bc6e602e7b79e2204e25dbd10ab3aa2beef3c5a9cad2c7"}, + {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:1c9b1ed7ed282b36571638297525f8ef80f34b3e2d600a56f962c6044f24200d"}, + {file = "simplejson-3.17.6-cp36-cp36m-win32.whl", hash = "sha256:632ecbbd2228575e6860c9e49ea3cc5423764d5aa70b92acc4e74096fb434044"}, + {file = "simplejson-3.17.6-cp36-cp36m-win_amd64.whl", hash = "sha256:4c09868ddb86bf79b1feb4e3e7e4a35cd6e61ddb3452b54e20cf296313622566"}, + {file = "simplejson-3.17.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b6bd8144f15a491c662f06814bd8eaa54b17f26095bb775411f39bacaf66837"}, + {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5decdc78849617917c206b01e9fc1d694fd58caa961be816cb37d3150d613d9a"}, + {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:521877c7bd060470806eb6335926e27453d740ac1958eaf0d8c00911bc5e1802"}, + {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:65b998193bd7b0c7ecdfffbc825d808eac66279313cb67d8892bb259c9d91494"}, + {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ac786f6cb7aa10d44e9641c7a7d16d7f6e095b138795cd43503769d4154e0dc2"}, + {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3ff5b3464e1ce86a8de8c88e61d4836927d5595c2162cab22e96ff551b916e81"}, + {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:69bd56b1d257a91e763256d63606937ae4eb890b18a789b66951c00062afec33"}, + {file = "simplejson-3.17.6-cp37-cp37m-win32.whl", hash = "sha256:b81076552d34c27e5149a40187a8f7e2abb2d3185576a317aaf14aeeedad862a"}, + {file = "simplejson-3.17.6-cp37-cp37m-win_amd64.whl", hash = "sha256:07ecaafc1b1501f275bf5acdee34a4ad33c7c24ede287183ea77a02dc071e0c0"}, + {file = "simplejson-3.17.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:068670af975247acbb9fc3d5393293368cda17026db467bf7a51548ee8f17ee1"}, + {file = "simplejson-3.17.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4d1c135af0c72cb28dd259cf7ba218338f4dc027061262e46fe058b4e6a4c6a3"}, + {file = "simplejson-3.17.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23fe704da910ff45e72543cbba152821685a889cf00fc58d5c8ee96a9bad5f94"}, + {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f444762fed1bc1fd75187ef14a20ed900c1fbb245d45be9e834b822a0223bc81"}, + {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:681eb4d37c9a9a6eb9b3245a5e89d7f7b2b9895590bb08a20aa598c1eb0a1d9d"}, + {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8e8607d8f6b4f9d46fee11447e334d6ab50e993dd4dbfb22f674616ce20907ab"}, + {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b10556817f09d46d420edd982dd0653940b90151d0576f09143a8e773459f6fe"}, + {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e1ec8a9ee0987d4524ffd6299e778c16cc35fef6d1a2764e609f90962f0b293a"}, + {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0b4126cac7d69ac06ff22efd3e0b3328a4a70624fcd6bca4fc1b4e6d9e2e12bf"}, + {file = "simplejson-3.17.6-cp38-cp38-win32.whl", hash = "sha256:35a49ebef25f1ebdef54262e54ae80904d8692367a9f208cdfbc38dbf649e00a"}, + {file = "simplejson-3.17.6-cp38-cp38-win_amd64.whl", hash = "sha256:743cd768affaa508a21499f4858c5b824ffa2e1394ed94eb85caf47ac0732198"}, + {file = "simplejson-3.17.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb62d517a516128bacf08cb6a86ecd39fb06d08e7c4980251f5d5601d29989ba"}, + {file = "simplejson-3.17.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:12133863178a8080a3dccbf5cb2edfab0001bc41e5d6d2446af2a1131105adfe"}, + {file = "simplejson-3.17.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5540fba2d437edaf4aa4fbb80f43f42a8334206ad1ad3b27aef577fd989f20d9"}, + {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d74ee72b5071818a1a5dab47338e87f08a738cb938a3b0653b9e4d959ddd1fd9"}, + {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:28221620f4dcabdeac310846629b976e599a13f59abb21616356a85231ebd6ad"}, + {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b09bc62e5193e31d7f9876220fb429ec13a6a181a24d897b9edfbbdbcd678851"}, + {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7255a37ff50593c9b2f1afa8fafd6ef5763213c1ed5a9e2c6f5b9cc925ab979f"}, + {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:401d40969cee3df7bda211e57b903a534561b77a7ade0dd622a8d1a31eaa8ba7"}, + {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a649d0f66029c7eb67042b15374bd93a26aae202591d9afd71e111dd0006b198"}, + {file = "simplejson-3.17.6-cp39-cp39-win32.whl", hash = "sha256:522fad7be85de57430d6d287c4b635813932946ebf41b913fe7e880d154ade2e"}, + {file = "simplejson-3.17.6-cp39-cp39-win_amd64.whl", hash = "sha256:3fe87570168b2ae018391e2b43fbf66e8593a86feccb4b0500d134c998983ccc"}, + {file = "simplejson-3.17.6.tar.gz", hash = "sha256:cf98038d2abf63a1ada5730e91e84c642ba6c225b0198c3684151b1f80c5f8a6"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smmap" +version = "5.0.0" +description = "A pure Python implementation of a sliding window memory map manager" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, + {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, +] + +[[package]] +name = "sortedcontainers" +version = "2.4.0" +description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, + {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, +] + +[[package]] +name = "systemd-python" +version = "234" +description = "Python interface for libsystemd" +category = "main" +optional = true +python-versions = "*" +files = [ + {file = "systemd-python-234.tar.gz", hash = "sha256:fd0e44bf70eadae45aadc292cb0a7eb5b0b6372cd1b391228047d33895db83e7"}, +] + +[[package]] +name = "threadloop" +version = "1.0.2" +description = "Tornado IOLoop Backed Concurrent Futures" +category = "main" +optional = true +python-versions = "*" +files = [ + {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, + {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, +] + +[package.dependencies] +tornado = "*" + +[[package]] name = "thrift" version = "0.15.0" description = "Python bindings for the Apache Thrift RPC system" category = "main" optional = true python-versions = "*" +files = [ + {file = "thrift-0.15.0.tar.gz", hash = "sha256:87c8205a71cf8bbb111cb99b1f7495070fbc9cabb671669568854210da5b3e29"}, +] [package.dependencies] six = ">=1.7.2" @@ -1186,6 +2240,10 @@ description = "A lil' TOML parser" category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"}, + {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"}, +] [[package]] name = "tornado" @@ -1194,6 +2252,49 @@ description = "Tornado is a Python web framework and asynchronous networking lib category = "main" optional = true python-versions = ">= 3.5" +files = [ + {file = "tornado-6.1-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32"}, + {file = "tornado-6.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c"}, + {file = "tornado-6.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9de9e5188a782be6b1ce866e8a51bc76a0fbaa0e16613823fc38e4fc2556ad05"}, + {file = "tornado-6.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:61b32d06ae8a036a6607805e6720ef00a3c98207038444ba7fd3d169cd998910"}, + {file = "tornado-6.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:3e63498f680547ed24d2c71e6497f24bca791aca2fe116dbc2bd0ac7f191691b"}, + {file = "tornado-6.1-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:6c77c9937962577a6a76917845d06af6ab9197702a42e1346d8ae2e76b5e3675"}, + {file = "tornado-6.1-cp35-cp35m-win32.whl", hash = "sha256:6286efab1ed6e74b7028327365cf7346b1d777d63ab30e21a0f4d5b275fc17d5"}, + {file = "tornado-6.1-cp35-cp35m-win_amd64.whl", hash = "sha256:fa2ba70284fa42c2a5ecb35e322e68823288a4251f9ba9cc77be04ae15eada68"}, + {file = "tornado-6.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0a00ff4561e2929a2c37ce706cb8233b7907e0cdc22eab98888aca5dd3775feb"}, + {file = "tornado-6.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:748290bf9112b581c525e6e6d3820621ff020ed95af6f17fedef416b27ed564c"}, + {file = "tornado-6.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e385b637ac3acaae8022e7e47dfa7b83d3620e432e3ecb9a3f7f58f150e50921"}, + {file = "tornado-6.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:25ad220258349a12ae87ede08a7b04aca51237721f63b1808d39bdb4b2164558"}, + {file = "tornado-6.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:65d98939f1a2e74b58839f8c4dab3b6b3c1ce84972ae712be02845e65391ac7c"}, + {file = "tornado-6.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:e519d64089b0876c7b467274468709dadf11e41d65f63bba207e04217f47c085"}, + {file = "tornado-6.1-cp36-cp36m-win32.whl", hash = "sha256:b87936fd2c317b6ee08a5741ea06b9d11a6074ef4cc42e031bc6403f82a32575"}, + {file = "tornado-6.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cc0ee35043162abbf717b7df924597ade8e5395e7b66d18270116f8745ceb795"}, + {file = "tornado-6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7250a3fa399f08ec9cb3f7b1b987955d17e044f1ade821b32e5f435130250d7f"}, + {file = "tornado-6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ed3ad863b1b40cd1d4bd21e7498329ccaece75db5a5bf58cd3c9f130843e7102"}, + {file = "tornado-6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:dcef026f608f678c118779cd6591c8af6e9b4155c44e0d1bc0c87c036fb8c8c4"}, + {file = "tornado-6.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:70dec29e8ac485dbf57481baee40781c63e381bebea080991893cd297742b8fd"}, + {file = "tornado-6.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d3f7594930c423fd9f5d1a76bee85a2c36fd8b4b16921cae7e965f22575e9c01"}, + {file = "tornado-6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3447475585bae2e77ecb832fc0300c3695516a47d46cefa0528181a34c5b9d3d"}, + {file = "tornado-6.1-cp37-cp37m-win32.whl", hash = "sha256:e7229e60ac41a1202444497ddde70a48d33909e484f96eb0da9baf8dc68541df"}, + {file = "tornado-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:cb5ec8eead331e3bb4ce8066cf06d2dfef1bfb1b2a73082dfe8a161301b76e37"}, + {file = "tornado-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:20241b3cb4f425e971cb0a8e4ffc9b0a861530ae3c52f2b0434e6c1b57e9fd95"}, + {file = "tornado-6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c77da1263aa361938476f04c4b6c8916001b90b2c2fdd92d8d535e1af48fba5a"}, + {file = "tornado-6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fba85b6cd9c39be262fcd23865652920832b61583de2a2ca907dbd8e8a8c81e5"}, + {file = "tornado-6.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:1e8225a1070cd8eec59a996c43229fe8f95689cb16e552d130b9793cb570a288"}, + {file = "tornado-6.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d14d30e7f46a0476efb0deb5b61343b1526f73ebb5ed84f23dc794bdb88f9d9f"}, + {file = "tornado-6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f959b26f2634a091bb42241c3ed8d3cedb506e7c27b8dd5c7b9f745318ddbb6"}, + {file = "tornado-6.1-cp38-cp38-win32.whl", hash = "sha256:34ca2dac9e4d7afb0bed4677512e36a52f09caa6fded70b4e3e1c89dbd92c326"}, + {file = "tornado-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:6196a5c39286cc37c024cd78834fb9345e464525d8991c21e908cc046d1cc02c"}, + {file = "tornado-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0ba29bafd8e7e22920567ce0d232c26d4d47c8b5cf4ed7b562b5db39fa199c5"}, + {file = "tornado-6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:33892118b165401f291070100d6d09359ca74addda679b60390b09f8ef325ffe"}, + {file = "tornado-6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7da13da6f985aab7f6f28debab00c67ff9cbacd588e8477034c0652ac141feea"}, + {file = "tornado-6.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:e0791ac58d91ac58f694d8d2957884df8e4e2f6687cdf367ef7eb7497f79eaa2"}, + {file = "tornado-6.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:66324e4e1beede9ac79e60f88de548da58b1f8ab4b2f1354d8375774f997e6c0"}, + {file = "tornado-6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a48900ecea1cbb71b8c71c620dee15b62f85f7c14189bdeee54966fbd9a0c5bd"}, + {file = "tornado-6.1-cp39-cp39-win32.whl", hash = "sha256:d3d20ea5782ba63ed13bc2b8c291a053c8d807a8fa927d941bd718468f7b950c"}, + {file = "tornado-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4"}, + {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"}, +] [[package]] name = "towncrier" @@ -1202,6 +2303,10 @@ description = "Building newsfiles for your project." category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "towncrier-22.12.0-py3-none-any.whl", hash = "sha256:9767a899a4d6856950f3598acd9e8f08da2663c49fdcda5ea0f9e6ba2afc8eea"}, + {file = "towncrier-22.12.0.tar.gz", hash = "sha256:9c49d7e75f646a9aea02ae904c0bc1639c8fd14a01292d2b123b8d307564034d"}, +] [package.dependencies] click = "*" @@ -1221,6 +2326,10 @@ description = "High-level Twisted HTTP Client API" category = "main" optional = false python-versions = ">=3.6" +files = [ + {file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"}, + {file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"}, +] [package.dependencies] attrs = "*" @@ -1240,6 +2349,10 @@ description = "Collection of utilities for publishing packages on PyPI" category = "dev" optional = false python-versions = ">=3.7" +files = [ + {file = "twine-4.0.2-py3-none-any.whl", hash = "sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8"}, + {file = "twine-4.0.2.tar.gz", hash = "sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8"}, +] [package.dependencies] importlib-metadata = ">=3.6" @@ -1259,6 +2372,10 @@ description = "An asynchronous networking framework written in Python" category = "main" optional = false python-versions = ">=3.7.1" +files = [ + {file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"}, + {file = "Twisted-22.10.0.tar.gz", hash = "sha256:32acbd40a94f5f46e7b42c109bfae2b302250945561783a8b7a059048f2d4d31"}, +] [package.dependencies] attrs = ">=19.2.0" @@ -1297,6 +2414,20 @@ description = "An extension for use in the twisted I/O Completion Ports reactor. category = "main" optional = false python-versions = "*" +files = [ + {file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"}, + {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win32.whl", hash = "sha256:985c06a33f5c0dae92c71a036d1ea63872ee86a21dd9b01e1f287486f15524b4"}, + {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:81b3abe3527b367da0220482820cb12a16c661672b7bcfcde328902890d63323"}, + {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win32.whl", hash = "sha256:9dbb8823b49f06d4de52721b47de4d3b3026064ef4788ce62b1a21c57c3fff6f"}, + {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b9fed67cf0f951573f06d560ac2f10f2a4bbdc6697770113a2fc396ea2cb2565"}, + {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win32.whl", hash = "sha256:b76b4eed9b27fd63ddb0877efdd2d15835fdcb6baa745cb85b66e5d016ac2878"}, + {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:851b3735ca7e8102e661872390e3bce88f8901bece95c25a0c8bb9ecb8a23d32"}, + {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win32.whl", hash = "sha256:bf4133139d77fc706d8f572e6b7d82871d82ec7ef25d685c2351bdacfb701415"}, + {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:306becd6e22ab6e8e4f36b6bdafd9c92e867c98a5ce517b27fdd27760ee7ae41"}, + {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win32.whl", hash = "sha256:3c61742cb0bc6c1ac117a7e5f422c129832f0c295af49e01d8a6066df8cfc04d"}, + {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:b435857b9efcbfc12f8c326ef0383f26416272260455bbca2cd8d8eca470c546"}, + {file = "twisted_iocpsupport-1.0.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:7d972cfa8439bdcb35a7be78b7ef86d73b34b808c74be56dfa785c8a93b851bf"}, +] [[package]] name = "txredisapi" @@ -1305,6 +2436,10 @@ description = "non-blocking redis client for python" category = "main" optional = true python-versions = "*" +files = [ + {file = "txredisapi-1.4.7-py3-none-any.whl", hash = "sha256:34c9eba8d34f452d30661f073b67b8cd42b695e3d31678ec1bbf628a65a0f059"}, + {file = "txredisapi-1.4.7.tar.gz", hash = "sha256:e6cc43f51e35d608abdca8f8c7d20e148fe1d82679f6e584baea613ebec812bb"}, +] [package.dependencies] six = "*" @@ -1317,6 +2452,32 @@ description = "a fork of Python 2 and 3 ast modules with type comment support" category = "dev" optional = false python-versions = ">=3.6" +files = [ + {file = "typed_ast-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:183b183b7771a508395d2cbffd6db67d6ad52958a5fdc99f450d954003900266"}, + {file = "typed_ast-1.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:676d051b1da67a852c0447621fdd11c4e104827417bf216092ec3e286f7da596"}, + {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc2542e83ac8399752bc16e0b35e038bdb659ba237f4222616b4e83fb9654985"}, + {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74cac86cc586db8dfda0ce65d8bcd2bf17b58668dfcc3652762f3ef0e6677e76"}, + {file = "typed_ast-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:18fe320f354d6f9ad3147859b6e16649a0781425268c4dde596093177660e71a"}, + {file = "typed_ast-1.5.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:31d8c6b2df19a777bc8826770b872a45a1f30cfefcfd729491baa5237faae837"}, + {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:963a0ccc9a4188524e6e6d39b12c9ca24cc2d45a71cfdd04a26d883c922b4b78"}, + {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0eb77764ea470f14fcbb89d51bc6bbf5e7623446ac4ed06cbd9ca9495b62e36e"}, + {file = "typed_ast-1.5.2-cp36-cp36m-win_amd64.whl", hash = "sha256:294a6903a4d087db805a7656989f613371915fc45c8cc0ddc5c5a0a8ad9bea4d"}, + {file = "typed_ast-1.5.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:26a432dc219c6b6f38be20a958cbe1abffcc5492821d7e27f08606ef99e0dffd"}, + {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7407cfcad702f0b6c0e0f3e7ab876cd1d2c13b14ce770e412c0c4b9728a0f88"}, + {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f30ddd110634c2d7534b2d4e0e22967e88366b0d356b24de87419cc4410c41b7"}, + {file = "typed_ast-1.5.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8c08d6625bb258179b6e512f55ad20f9dfef019bbfbe3095247401e053a3ea30"}, + {file = "typed_ast-1.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:90904d889ab8e81a956f2c0935a523cc4e077c7847a836abee832f868d5c26a4"}, + {file = "typed_ast-1.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bbebc31bf11762b63bf61aaae232becb41c5bf6b3461b80a4df7e791fabb3aca"}, + {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29dd9a3a9d259c9fa19d19738d021632d673f6ed9b35a739f48e5f807f264fb"}, + {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:58ae097a325e9bb7a684572d20eb3e1809802c5c9ec7108e85da1eb6c1a3331b"}, + {file = "typed_ast-1.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:da0a98d458010bf4fe535f2d1e367a2e2060e105978873c04c04212fb20543f7"}, + {file = "typed_ast-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:33b4a19ddc9fc551ebabca9765d54d04600c4a50eda13893dadf67ed81d9a098"}, + {file = "typed_ast-1.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1098df9a0592dd4c8c0ccfc2e98931278a6c6c53cb3a3e2cf7e9ee3b06153344"}, + {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c47c3b43fe3a39ddf8de1d40dbbfca60ac8530a36c9b198ea5b9efac75c09e"}, + {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f290617f74a610849bd8f5514e34ae3d09eafd521dceaa6cf68b3f4414266d4e"}, + {file = "typed_ast-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:df05aa5b241e2e8045f5f4367a9f6187b09c4cdf8578bb219861c4e27c443db5"}, + {file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"}, +] [[package]] name = "types-bleach" @@ -1325,6 +2486,10 @@ description = "Typing stubs for bleach" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-bleach-5.0.3.1.tar.gz", hash = "sha256:ce8772ea5126dab1883851b41e3aeff229aa5213ced36096990344e632e92373"}, + {file = "types_bleach-5.0.3.1-py3-none-any.whl", hash = "sha256:af5f1b3a54ff279f54c29eccb2e6988ebb6718bc4061469588a5fd4880a79287"}, +] [[package]] name = "types-commonmark" @@ -1333,6 +2498,10 @@ description = "Typing stubs for commonmark" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"}, + {file = "types_commonmark-0.9.2-py3-none-any.whl", hash = "sha256:56f20199a1f9a2924443211a0ef97f8b15a8a956a7f4e9186be6950bf38d6d02"}, +] [[package]] name = "types-cryptography" @@ -1341,6 +2510,10 @@ description = "Typing stubs for cryptography" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-cryptography-3.3.15.tar.gz", hash = "sha256:a7983a75a7b88a18f88832008f0ef140b8d1097888ec1a0824ec8fb7e105273b"}, + {file = "types_cryptography-3.3.15-py3-none-any.whl", hash = "sha256:d9b0dd5465d7898d400850e7f35e5518aa93a7e23d3e11757cd81b4777089046"}, +] [package.dependencies] types-enum34 = "*" @@ -1353,6 +2526,10 @@ description = "Typing stubs for docutils" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-docutils-0.19.1.1.tar.gz", hash = "sha256:be0a51ba1c7dd215d9d2df66d6845e63c1009b4bbf4c5beb87a0d9745cdba962"}, + {file = "types_docutils-0.19.1.1-py3-none-any.whl", hash = "sha256:a024cada35f0c13cc45eb0b68a102719018a634013690b7fef723bcbfadbd1f1"}, +] [[package]] name = "types-enum34" @@ -1361,6 +2538,10 @@ description = "Typing stubs for enum34" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-enum34-1.1.8.tar.gz", hash = "sha256:6f9c769641d06d73a55e11c14d38ac76fcd37eb545ce79cebb6eec9d50a64110"}, + {file = "types_enum34-1.1.8-py3-none-any.whl", hash = "sha256:05058c7a495f6bfaaca0be4aeac3cce5cdd80a2bad2aab01fd49a20bf4a0209d"}, +] [[package]] name = "types-ipaddress" @@ -1369,6 +2550,10 @@ description = "Typing stubs for ipaddress" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-ipaddress-1.0.8.tar.gz", hash = "sha256:a03df3be5935e50ba03fa843daabff539a041a28e73e0fce2c5705bee54d3841"}, + {file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"}, +] [[package]] name = "types-jsonschema" @@ -1377,6 +2562,10 @@ description = "Typing stubs for jsonschema" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-jsonschema-4.17.0.2.tar.gz", hash = "sha256:8b9e1140d4d780f0f19b5cab1b8a3732e8dd5e49dbc1f174cc0b499125ca6f6c"}, + {file = "types_jsonschema-4.17.0.2-py3-none-any.whl", hash = "sha256:8fd2f9aea4da54f9a811baa6963aac10fd680c18baa6237392c079b97d152738"}, +] [[package]] name = "types-opentracing" @@ -1385,6 +2574,10 @@ description = "Typing stubs for opentracing" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-opentracing-2.4.10.tar.gz", hash = "sha256:6101414f3b6d3b9c10f1c510a261e8439b6c8d67c723d5c2872084697b4580a7"}, + {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"}, +] [[package]] name = "types-pillow" @@ -1393,6 +2586,10 @@ description = "Typing stubs for Pillow" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-Pillow-9.4.0.0.tar.gz", hash = "sha256:ef8a823638ceb765a144a98a2f816b8912da0337c5c2556d33774f1434f9918c"}, + {file = "types_Pillow-9.4.0.0-py3-none-any.whl", hash = "sha256:246f0dc52d575ef64e01f06f41be37a492b542ee3180638a7b874a6dd4d48c01"}, +] [[package]] name = "types-psycopg2" @@ -1401,6 +2598,10 @@ description = "Typing stubs for psycopg2" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-psycopg2-2.9.21.2.tar.gz", hash = "sha256:bff045579642ce00b4a3c8f2e401b7f96dfaa34939f10be64b0dd3b53feca57d"}, + {file = "types_psycopg2-2.9.21.2-py3-none-any.whl", hash = "sha256:084558d6bc4b2cfa249b06be0fdd9a14a69d307bae5bb5809a2f14cfbaa7a23f"}, +] [[package]] name = "types-pyopenssl" @@ -1409,6 +2610,10 @@ description = "Typing stubs for pyOpenSSL" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"}, + {file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"}, +] [package.dependencies] types-cryptography = "*" @@ -1420,14 +2625,22 @@ description = "Typing stubs for PyYAML" category = "dev" optional = false python-versions = "*" - -[[package]] +files = [ + {file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"}, + {file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"}, +] + +[[package]] name = "types-requests" version = "2.28.11.7" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-requests-2.28.11.7.tar.gz", hash = "sha256:0ae38633734990d019b80f5463dfa164ebd3581998ac8435f526da6fe4d598c3"}, + {file = "types_requests-2.28.11.7-py3-none-any.whl", hash = "sha256:b6a2fca8109f4fdba33052f11ed86102bddb2338519e1827387137fefc66a98b"}, +] [package.dependencies] types-urllib3 = "<1.27" @@ -1439,6 +2652,10 @@ description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-setuptools-65.6.0.3.tar.gz", hash = "sha256:7ddd7415282fa97ab18e490206067c0cdb126b103743e72ee86783d7af6481c5"}, + {file = "types_setuptools-65.6.0.3-py3-none-any.whl", hash = "sha256:ad729fc3a9a3946f73915eaab16ce56b30ed5ae998479253d809d76b3889ee09"}, +] [package.dependencies] types-docutils = "*" @@ -1450,6 +2667,10 @@ description = "Typing stubs for urllib3" category = "dev" optional = false python-versions = "*" +files = [ + {file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"}, + {file = "types_urllib3-1.26.10-py3-none-any.whl", hash = "sha256:d755278d5ecd7a7a6479a190e54230f241f1a99c19b81518b756b19dc69e518c"}, +] [[package]] name = "typing-extensions" @@ -1458,6 +2679,10 @@ description = "Backported and Experimental Type Hints for Python 3.7+" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, + {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, +] [[package]] name = "unpaddedbase64" @@ -1466,6 +2691,10 @@ description = "Encode and decode Base64 without \"=\" padding" category = "main" optional = false python-versions = ">=3.6,<4.0" +files = [ + {file = "unpaddedbase64-2.1.0-py3-none-any.whl", hash = "sha256:485eff129c30175d2cd6f0cd8d2310dff51e666f7f36175f738d75dfdbd0b1c6"}, + {file = "unpaddedbase64-2.1.0.tar.gz", hash = "sha256:7273c60c089de39d90f5d6d4a7883a79e319dc9d9b1c8924a7fab96178a5f005"}, +] [[package]] name = "urllib3" @@ -1474,6 +2703,10 @@ description = "HTTP library with thread-safe connection pooling, file post, and category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4" +files = [ + {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"}, + {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"}, +] [package.extras] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] @@ -1487,6 +2720,10 @@ description = "Character encoding aliases for legacy web content" category = "main" optional = false python-versions = "*" +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] [[package]] name = "wrapt" @@ -1495,6 +2732,72 @@ description = "Module for decorators, wrappers and monkey patching." category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +files = [ + {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"}, + {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"}, + {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"}, + {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"}, + {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"}, + {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"}, + {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, + {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, + {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, + {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"}, + {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"}, + {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"}, + {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"}, + {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"}, + {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"}, + {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"}, + {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"}, + {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"}, + {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"}, + {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"}, + {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"}, + {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"}, + {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"}, + {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"}, + {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"}, + {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"}, + {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"}, + {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"}, + {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"}, + {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"}, + {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"}, + {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"}, + {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"}, + {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"}, + {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, +] [[package]] name = "xmlschema" @@ -1503,6 +2806,10 @@ description = "An XML Schema validator and decoder" category = "main" optional = true python-versions = ">=3.7" +files = [ + {file = "xmlschema-1.10.0-py3-none-any.whl", hash = "sha256:dbd68bded2fef00c19cf37110ca0565eca34cf0b6c9e1d3b62ad0de8cbb582ca"}, + {file = "xmlschema-1.10.0.tar.gz", hash = "sha256:be1eedce6a4b911fd3a7f4060d0811951820a13410e61f0454b30e9f4e7cf197"}, +] [package.dependencies] elementpath = ">=2.5.0,<3.0.0" @@ -1519,6 +2826,10 @@ description = "Backport of pathlib-compatible object wrapper for zip files" category = "main" optional = false python-versions = ">=3.7" +files = [ + {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, + {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, +] [package.extras] docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"] @@ -1531,6 +2842,10 @@ description = "Very basic event publishing system" category = "dev" optional = false python-versions = "*" +files = [ + {file = "zope.event-4.5.0-py2.py3-none-any.whl", hash = "sha256:2666401939cdaa5f4e0c08cf7f20c9b21423b95e88f4675b1443973bdb080c42"}, + {file = "zope.event-4.5.0.tar.gz", hash = "sha256:5e76517f5b9b119acf37ca8819781db6c16ea433f7e2062c4afc2b6fbedb1330"}, +] [package.dependencies] setuptools = "*" @@ -1546,6 +2861,59 @@ description = "Interfaces for Python" category = "main" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "zope.interface-5.4.0-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:7df1e1c05304f26faa49fa752a8c690126cf98b40b91d54e6e9cc3b7d6ffe8b7"}, + {file = "zope.interface-5.4.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:2c98384b254b37ce50eddd55db8d381a5c53b4c10ee66e1e7fe749824f894021"}, + {file = "zope.interface-5.4.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:08f9636e99a9d5410181ba0729e0408d3d8748026ea938f3b970a0249daa8192"}, + {file = "zope.interface-5.4.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0ea1d73b7c9dcbc5080bb8aaffb776f1c68e807767069b9ccdd06f27a161914a"}, + {file = "zope.interface-5.4.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:273f158fabc5ea33cbc936da0ab3d4ba80ede5351babc4f577d768e057651531"}, + {file = "zope.interface-5.4.0-cp27-cp27m-win32.whl", hash = "sha256:a1e6e96217a0f72e2b8629e271e1b280c6fa3fe6e59fa8f6701bec14e3354325"}, + {file = "zope.interface-5.4.0-cp27-cp27m-win_amd64.whl", hash = "sha256:877473e675fdcc113c138813a5dd440da0769a2d81f4d86614e5d62b69497155"}, + {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f7ee479e96f7ee350db1cf24afa5685a5899e2b34992fb99e1f7c1b0b758d263"}, + {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:b0297b1e05fd128d26cc2460c810d42e205d16d76799526dfa8c8ccd50e74959"}, + {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:af310ec8335016b5e52cae60cda4a4f2a60a788cbb949a4fbea13d441aa5a09e"}, + {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:9a9845c4c6bb56e508651f005c4aeb0404e518c6f000d5a1123ab077ab769f5c"}, + {file = "zope.interface-5.4.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0b465ae0962d49c68aa9733ba92a001b2a0933c317780435f00be7ecb959c702"}, + {file = "zope.interface-5.4.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:5dd9ca406499444f4c8299f803d4a14edf7890ecc595c8b1c7115c2342cadc5f"}, + {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:469e2407e0fe9880ac690a3666f03eb4c3c444411a5a5fddfdabc5d184a79f05"}, + {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:52de7fc6c21b419078008f697fd4103dbc763288b1406b4562554bd47514c004"}, + {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:3dd4952748521205697bc2802e4afac5ed4b02909bb799ba1fe239f77fd4e117"}, + {file = "zope.interface-5.4.0-cp35-cp35m-win32.whl", hash = "sha256:dd93ea5c0c7f3e25335ab7d22a507b1dc43976e1345508f845efc573d3d779d8"}, + {file = "zope.interface-5.4.0-cp35-cp35m-win_amd64.whl", hash = "sha256:3748fac0d0f6a304e674955ab1365d515993b3a0a865e16a11ec9d86fb307f63"}, + {file = "zope.interface-5.4.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:66c0061c91b3b9cf542131148ef7ecbecb2690d48d1612ec386de9d36766058f"}, + {file = "zope.interface-5.4.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:d0c1bc2fa9a7285719e5678584f6b92572a5b639d0e471bb8d4b650a1a910920"}, + {file = "zope.interface-5.4.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2876246527c91e101184f63ccd1d716ec9c46519cc5f3d5375a3351c46467c46"}, + {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:334701327f37c47fa628fc8b8d28c7d7730ce7daaf4bda1efb741679c2b087fc"}, + {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:71aace0c42d53abe6fc7f726c5d3b60d90f3c5c055a447950ad6ea9cec2e37d9"}, + {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:5bb3489b4558e49ad2c5118137cfeaf59434f9737fa9c5deefc72d22c23822e2"}, + {file = "zope.interface-5.4.0-cp36-cp36m-win32.whl", hash = "sha256:1c0e316c9add0db48a5b703833881351444398b04111188069a26a61cfb4df78"}, + {file = "zope.interface-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f0c02cbb9691b7c91d5009108f975f8ffeab5dff8f26d62e21c493060eff2a1"}, + {file = "zope.interface-5.4.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:7d97a4306898b05404a0dcdc32d9709b7d8832c0c542b861d9a826301719794e"}, + {file = "zope.interface-5.4.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:867a5ad16892bf20e6c4ea2aab1971f45645ff3102ad29bd84c86027fa99997b"}, + {file = "zope.interface-5.4.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5f931a1c21dfa7a9c573ec1f50a31135ccce84e32507c54e1ea404894c5eb96f"}, + {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:194d0bcb1374ac3e1e023961610dc8f2c78a0f5f634d0c737691e215569e640d"}, + {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:8270252effc60b9642b423189a2fe90eb6b59e87cbee54549db3f5562ff8d1b8"}, + {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:15e7d1f7a6ee16572e21e3576d2012b2778cbacf75eb4b7400be37455f5ca8bf"}, + {file = "zope.interface-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:8892f89999ffd992208754851e5a052f6b5db70a1e3f7d54b17c5211e37a98c7"}, + {file = "zope.interface-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2e5a26f16503be6c826abca904e45f1a44ff275fdb7e9d1b75c10671c26f8b94"}, + {file = "zope.interface-5.4.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:0f91b5b948686659a8e28b728ff5e74b1be6bf40cb04704453617e5f1e945ef3"}, + {file = "zope.interface-5.4.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:4de4bc9b6d35c5af65b454d3e9bc98c50eb3960d5a3762c9438df57427134b8e"}, + {file = "zope.interface-5.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:bf68f4b2b6683e52bec69273562df15af352e5ed25d1b6641e7efddc5951d1a7"}, + {file = "zope.interface-5.4.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:63b82bb63de7c821428d513607e84c6d97d58afd1fe2eb645030bdc185440120"}, + {file = "zope.interface-5.4.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:db1fa631737dab9fa0b37f3979d8d2631e348c3b4e8325d6873c2541d0ae5a48"}, + {file = "zope.interface-5.4.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f44e517131a98f7a76696a7b21b164bcb85291cee106a23beccce454e1f433a4"}, + {file = "zope.interface-5.4.0-cp38-cp38-win32.whl", hash = "sha256:a9506a7e80bcf6eacfff7f804c0ad5350c8c95b9010e4356a4b36f5322f09abb"}, + {file = "zope.interface-5.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:3c02411a3b62668200910090a0dff17c0b25aaa36145082a5a6adf08fa281e54"}, + {file = "zope.interface-5.4.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:0cee5187b60ed26d56eb2960136288ce91bcf61e2a9405660d271d1f122a69a4"}, + {file = "zope.interface-5.4.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:a8156e6a7f5e2a0ff0c5b21d6bcb45145efece1909efcbbbf48c56f8da68221d"}, + {file = "zope.interface-5.4.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:205e40ccde0f37496904572035deea747390a8b7dc65146d30b96e2dd1359a83"}, + {file = "zope.interface-5.4.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:3f24df7124c323fceb53ff6168da70dbfbae1442b4f3da439cd441681f54fe25"}, + {file = "zope.interface-5.4.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:5208ebd5152e040640518a77827bdfcc73773a15a33d6644015b763b9c9febc1"}, + {file = "zope.interface-5.4.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:17776ecd3a1fdd2b2cd5373e5ef8b307162f581c693575ec62e7c5399d80794c"}, + {file = "zope.interface-5.4.0-cp39-cp39-win32.whl", hash = "sha256:d4d9d6c1a455d4babd320203b918ccc7fcbefe308615c521062bc2ba1aa4d26e"}, + {file = "zope.interface-5.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:0cba8477e300d64a11a9789ed40ee8932b59f9ee05f85276dbb4b59acee5dd09"}, + {file = "zope.interface-5.4.0.tar.gz", hash = "sha256:5dba5f530fec3f0988d83b78cc591b58c0b6eb8431a85edd1569a0539a8a5a0e"}, +] [package.dependencies] setuptools = "*" @@ -1562,6 +2930,10 @@ description = "zope.interface extension for defining data schemas" category = "dev" optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "zope.schema-6.2.0-py2.py3-none-any.whl", hash = "sha256:03150d8670549590b45109e06b7b964f4e751fa9cb5297ec4985c3bc38641b07"}, + {file = "zope.schema-6.2.0.tar.gz", hash = "sha256:2201aef8ad75ee5a881284d7a6acd384661d6dca7bde5e80a22839a77124595b"}, +] [package.dependencies] setuptools = "*" @@ -1589,1371 +2961,6 @@ url-preview = ["lxml"] user-search = ["pyicu"] [metadata] -lock-version = "1.1" +lock-version = "2.0" python-versions = "^3.7.1" content-hash = "53867af07a507c3addd614c828dfb26175f6604398848e84c0ea65980f8a59a2" - -[metadata.files] -attrs = [ - {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, - {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, -] -authlib = [ - {file = "Authlib-1.2.0-py2.py3-none-any.whl", hash = "sha256:4ddf4fd6cfa75c9a460b361d4bd9dac71ffda0be879dbe4292a02e92349ad55a"}, - {file = "Authlib-1.2.0.tar.gz", hash = "sha256:4fa3e80883a5915ef9f5bc28630564bc4ed5b5af39812a3ff130ec76bd631e9d"}, -] -automat = [ - {file = "Automat-22.10.0-py2.py3-none-any.whl", hash = "sha256:c3164f8742b9dc440f3682482d32aaff7bb53f71740dd018533f9de286b64180"}, - {file = "Automat-22.10.0.tar.gz", hash = "sha256:e56beb84edad19dcc11d30e8d9b895f75deeb5ef5e96b84a467066b3b84bb04e"}, -] -bcrypt = [ - {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3"}, - {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2"}, - {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535"}, - {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e"}, - {file = "bcrypt-4.0.1-cp36-abi3-win32.whl", hash = "sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab"}, - {file = "bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9"}, - {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c"}, - {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b"}, - {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d"}, - {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d"}, - {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215"}, - {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c"}, - {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda"}, - {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665"}, - {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71"}, - {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"}, -] -black = [ - {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, - {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, - {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, - {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, - {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, - {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, - {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, - {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, - {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, - {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, - {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, - {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, -] -bleach = [ - {file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"}, - {file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"}, -] -canonicaljson = [ - {file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"}, - {file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"}, -] -certifi = [ - {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, - {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, -] -cffi = [ - {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, - {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, - {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, - {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, - {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, - {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, - {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, - {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, - {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, - {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, - {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, - {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, - {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, - {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, - {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, - {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, - {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, - {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, - {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, -] -charset-normalizer = [ - {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"}, - {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, -] -click = [ - {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, - {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, -] -click-default-group = [ - {file = "click-default-group-1.2.2.tar.gz", hash = "sha256:d9560e8e8dfa44b3562fbc9425042a0fd6d21956fcc2db0077f63f34253ab904"}, -] -colorama = [ - {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, - {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, -] -commonmark = [ - {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, - {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, -] -constantly = [ - {file = "constantly-15.1.0-py2.py3-none-any.whl", hash = "sha256:dd2fa9d6b1a51a83f0d7dd76293d734046aa176e384bf6e33b7e44880eb37c5d"}, - {file = "constantly-15.1.0.tar.gz", hash = "sha256:586372eb92059873e29eba4f9dec8381541b4d3834660707faf8ba59146dfc35"}, -] -cryptography = [ - {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70"}, - {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c"}, - {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00"}, - {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0"}, - {file = "cryptography-38.0.4-cp36-abi3-win32.whl", hash = "sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744"}, - {file = "cryptography-38.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9"}, - {file = "cryptography-38.0.4.tar.gz", hash = "sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290"}, -] -defusedxml = [ - {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, - {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, -] -deprecated = [ - {file = "Deprecated-1.2.13-py2.py3-none-any.whl", hash = "sha256:64756e3e14c8c5eea9795d93c524551432a0be75629f8f29e67ab8caf076c76d"}, - {file = "Deprecated-1.2.13.tar.gz", hash = "sha256:43ac5335da90c31c24ba028af536a91d41d53f9e6901ddb021bcc572ce44e38d"}, -] -docutils = [ - {file = "docutils-0.18.1-py2.py3-none-any.whl", hash = "sha256:23010f129180089fbcd3bc08cfefccb3b890b0050e1ca00c867036e9d161b98c"}, - {file = "docutils-0.18.1.tar.gz", hash = "sha256:679987caf361a7539d76e584cbeddc311e3aee937877c87346f31debc63e9d06"}, -] -elementpath = [ - {file = "elementpath-2.5.0-py3-none-any.whl", hash = "sha256:2a432775e37a19e4362443078130a7dbfc457d7d093cd421c03958d9034cc08b"}, - {file = "elementpath-2.5.0.tar.gz", hash = "sha256:3a27aaf3399929fccda013899cb76d3ff111734abf4281e5f9d3721ba0b9ffa3"}, -] -frozendict = [ - {file = "frozendict-2.3.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4a3b32d47282ae0098b9239a6d53ec539da720258bd762d62191b46f2f87c5fc"}, - {file = "frozendict-2.3.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c9887179a245a66a50f52afa08d4d92ae0f269839fab82285c70a0fa0dd782"}, - {file = "frozendict-2.3.4-cp310-cp310-win_amd64.whl", hash = "sha256:b98a0d65a59af6da03f794f90b0c3085a7ee14e7bf8f0ef36b079ee8aa992439"}, - {file = "frozendict-2.3.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:3d8042b7dab5e992e30889c9b71b781d5feef19b372d47d735e4d7d45846fd4a"}, - {file = "frozendict-2.3.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a6d2e8b7cf6b6e5677a1a4b53b4073e5d9ec640d1db30dc679627668d25e90"}, - {file = "frozendict-2.3.4-cp36-cp36m-win_amd64.whl", hash = "sha256:dbbe1339ac2646523e0bb00d1896085d1f70de23780e4927ca82b36ab8a044d3"}, - {file = "frozendict-2.3.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95bac22f7f09d81f378f2b3f672b7a50a974ca180feae1507f5e21bc147e8bc8"}, - {file = "frozendict-2.3.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dae686722c144b333c4dbdc16323a5de11406d26b76d2be1cc175f90afacb5ba"}, - {file = "frozendict-2.3.4-cp37-cp37m-win_amd64.whl", hash = "sha256:389f395a74eb16992217ac1521e689c1dea2d70113bcb18714669ace1ed623b9"}, - {file = "frozendict-2.3.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ccb6450a416c9cc9acef7683e637e28356e3ceeabf83521f74cc2718883076b7"}, - {file = "frozendict-2.3.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aca59108b77cadc13ba7dfea7e8f50811208c7652a13dc6c7f92d7782a24d299"}, - {file = "frozendict-2.3.4-cp38-cp38-win_amd64.whl", hash = "sha256:3ec86ebf143dd685184215c27ec416c36e0ba1b80d81b1b9482f7d380c049b4e"}, - {file = "frozendict-2.3.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5809e6ff6b7257043a486f7a3b73a7da71cf69a38980b4171e4741291d0d9eb3"}, - {file = "frozendict-2.3.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c550ed7fdf1962984bec21630c584d722b3ee5d5f57a0ae2527a0121dc0414a"}, - {file = "frozendict-2.3.4-cp39-cp39-win_amd64.whl", hash = "sha256:3e93aebc6e69a8ef329bbe9afb8342bd33c7b5c7a0c480cb9f7e60b0cbe48072"}, - {file = "frozendict-2.3.4-py3-none-any.whl", hash = "sha256:d722f3d89db6ae35ef35ecc243c40c800eb344848c83dba4798353312cd37b15"}, - {file = "frozendict-2.3.4.tar.gz", hash = "sha256:15b4b18346259392b0d27598f240e9390fafbff882137a9c48a1e0104fb17f78"}, -] -gitdb = [ - {file = "gitdb-4.0.9-py3-none-any.whl", hash = "sha256:8033ad4e853066ba6ca92050b9df2f89301b8fc8bf7e9324d412a63f8bf1a8fd"}, - {file = "gitdb-4.0.9.tar.gz", hash = "sha256:bac2fd45c0a1c9cf619e63a90d62bdc63892ef92387424b855792a6cabe789aa"}, -] -gitpython = [ - {file = "GitPython-3.1.30-py3-none-any.whl", hash = "sha256:cd455b0000615c60e286208ba540271af9fe531fa6a87cc590a7298785ab2882"}, - {file = "GitPython-3.1.30.tar.gz", hash = "sha256:769c2d83e13f5d938b7688479da374c4e3d49f71549aaf462b646db9602ea6f8"}, -] -hiredis = [ - {file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0adea425b764a08270820531ec2218d0508f8ae15a448568109ffcae050fee26"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:3d55e36715ff06cdc0ab62f9591607c4324297b6b6ce5b58cb9928b3defe30ea"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:5d2a48c80cf5a338d58aae3c16872f4d452345e18350143b3bf7216d33ba7b99"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:240ce6dc19835971f38caf94b5738092cb1e641f8150a9ef9251b7825506cb05"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:5dc7a94bb11096bc4bffd41a3c4f2b958257085c01522aa81140c68b8bf1630a"}, - {file = "hiredis-2.0.0-cp36-cp36m-win32.whl", hash = "sha256:139705ce59d94eef2ceae9fd2ad58710b02aee91e7fa0ccb485665ca0ecbec63"}, - {file = "hiredis-2.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:c39c46d9e44447181cd502a35aad2bb178dbf1b1f86cf4db639d7b9614f837c6"}, - {file = "hiredis-2.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:adf4dd19d8875ac147bf926c727215a0faf21490b22c053db464e0bf0deb0485"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0f41827028901814c709e744060843c77e78a3aca1e0d6875d2562372fcb405a"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:508999bec4422e646b05c95c598b64bdbef1edf0d2b715450a078ba21b385bcc"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:0d5109337e1db373a892fdcf78eb145ffb6bbd66bb51989ec36117b9f7f9b579"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:04026461eae67fdefa1949b7332e488224eac9e8f2b5c58c98b54d29af22093e"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:a00514362df15af041cc06e97aebabf2895e0a7c42c83c21894be12b84402d79"}, - {file = "hiredis-2.0.0-cp37-cp37m-win32.whl", hash = "sha256:09004096e953d7ebd508cded79f6b21e05dff5d7361771f59269425108e703bc"}, - {file = "hiredis-2.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f8196f739092a78e4f6b1b2172679ed3343c39c61a3e9d722ce6fcf1dac2824a"}, - {file = "hiredis-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:294a6697dfa41a8cba4c365dd3715abc54d29a86a40ec6405d677ca853307cfb"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3dddf681284fe16d047d3ad37415b2e9ccdc6c8986c8062dbe51ab9a358b50a5"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:dcef843f8de4e2ff5e35e96ec2a4abbdf403bd0f732ead127bd27e51f38ac298"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:87c7c10d186f1743a8fd6a971ab6525d60abd5d5d200f31e073cd5e94d7e7a9d"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:7f0055f1809b911ab347a25d786deff5e10e9cf083c3c3fd2dd04e8612e8d9db"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:11d119507bb54e81f375e638225a2c057dda748f2b1deef05c2b1a5d42686048"}, - {file = "hiredis-2.0.0-cp38-cp38-win32.whl", hash = "sha256:7492af15f71f75ee93d2a618ca53fea8be85e7b625e323315169977fae752426"}, - {file = "hiredis-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:65d653df249a2f95673976e4e9dd7ce10de61cfc6e64fa7eeaa6891a9559c581"}, - {file = "hiredis-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae8427a5e9062ba66fc2c62fb19a72276cf12c780e8db2b0956ea909c48acff5"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:3f5f7e3a4ab824e3de1e1700f05ad76ee465f5f11f5db61c4b297ec29e692b2e"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e3447d9e074abf0e3cd85aef8131e01ab93f9f0e86654db7ac8a3f73c63706ce"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:8b42c0dc927b8d7c0eb59f97e6e34408e53bc489f9f90e66e568f329bff3e443"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:b84f29971f0ad4adaee391c6364e6f780d5aae7e9226d41964b26b49376071d0"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:0b39ec237459922c6544d071cdcf92cbb5bc6685a30e7c6d985d8a3e3a75326e"}, - {file = "hiredis-2.0.0-cp39-cp39-win32.whl", hash = "sha256:a7928283143a401e72a4fad43ecc85b35c27ae699cf5d54d39e1e72d97460e1d"}, - {file = "hiredis-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:a4ee8000454ad4486fb9f28b0cab7fa1cd796fc36d639882d0b34109b5b3aec9"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1f03d4dadd595f7a69a75709bc81902673fa31964c75f93af74feac2f134cc54"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:04927a4c651a0e9ec11c68e4427d917e44ff101f761cd3b5bc76f86aaa431d27"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:a39efc3ade8c1fb27c097fd112baf09d7fd70b8cb10ef1de4da6efbe066d381d"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-win32.whl", hash = "sha256:07bbf9bdcb82239f319b1f09e8ef4bdfaec50ed7d7ea51a56438f39193271163"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:807b3096205c7cec861c8803a6738e33ed86c9aae76cac0e19454245a6bbbc0a"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:1233e303645f468e399ec906b6b48ab7cd8391aae2d08daadbb5cad6ace4bd87"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:cb2126603091902767d96bcb74093bd8b14982f41809f85c9b96e519c7e1dc41"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-win32.whl", hash = "sha256:f52010e0a44e3d8530437e7da38d11fb822acfb0d5b12e9cd5ba655509937ca0"}, - {file = "hiredis-2.0.0.tar.gz", hash = "sha256:81d6d8e39695f2c37954d1011c0480ef7cf444d4e3ae24bc5e89ee5de360139a"}, -] -hyperlink = [ - {file = "hyperlink-21.0.0-py2.py3-none-any.whl", hash = "sha256:e6b14c37ecb73e89c77d78cdb4c2cc8f3fb59a885c5b3f819ff4ed80f25af1b4"}, - {file = "hyperlink-21.0.0.tar.gz", hash = "sha256:427af957daa58bc909471c6c40f74c5450fa123dd093fc53efd2e91d2705a56b"}, -] -idna = [ - {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, - {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, -] -ijson = [ - {file = "ijson-3.1.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:6c1a777096be5f75ffebb335c6d2ebc0e489b231496b7f2ca903aa061fe7d381"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:475fc25c3d2a86230b85777cae9580398b42eed422506bf0b6aacfa936f7bfcd"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f587699b5a759e30accf733e37950cc06c4118b72e3e146edcea77dded467426"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:339b2b4c7bbd64849dd69ef94ee21e29dcd92c831f47a281fdd48122bb2a715a"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:446ef8980504da0af8d20d3cb6452c4dc3d8aa5fd788098985e899b913191fe6"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:3997a2fdb28bc04b9ab0555db5f3b33ed28d91e9d42a3bf2c1842d4990beb158"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:fa10a1d88473303ec97aae23169d77c5b92657b7fb189f9c584974c00a79f383"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:9a5bf5b9d8f2ceaca131ee21fc7875d0f34b95762f4f32e4d65109ca46472147"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:81cc8cee590c8a70cca3c9aefae06dd7cb8e9f75f3a7dc12b340c2e332d33a2a"}, - {file = "ijson-3.1.4-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4ea5fc50ba158f72943d5174fbc29ebefe72a2adac051c814c87438dc475cf78"}, - {file = "ijson-3.1.4-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:3b98861a4280cf09d267986cefa46c3bd80af887eae02aba07488d80eb798afa"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:068c692efba9692406b86736dcc6803e4a0b6280d7f0b7534bff3faec677ff38"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:86884ac06ac69cea6d89ab7b84683b3b4159c4013e4a20276d3fc630fe9b7588"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:41e5886ff6fade26f10b87edad723d2db14dcbb1178717790993fcbbb8ccd333"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:24b58933bf777d03dc1caa3006112ec7f9e6f6db6ffe1f5f5bd233cb1281f719"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:13f80aad0b84d100fb6a88ced24bade21dc6ddeaf2bba3294b58728463194f50"}, - {file = "ijson-3.1.4-cp35-cp35m-win32.whl", hash = "sha256:fa9a25d0bd32f9515e18a3611690f1de12cb7d1320bd93e9da835936b41ad3ff"}, - {file = "ijson-3.1.4-cp35-cp35m-win_amd64.whl", hash = "sha256:c4c1bf98aaab4c8f60d238edf9bcd07c896cfcc51c2ca84d03da22aad88957c5"}, - {file = "ijson-3.1.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f0f2a87c423e8767368aa055310024fa28727f4454463714fef22230c9717f64"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:15507de59d74d21501b2a076d9c49abf927eb58a51a01b8f28a0a0565db0a99f"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2e6bd6ad95ab40c858592b905e2bbb4fe79bbff415b69a4923dafe841ffadcb4"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:68e295bb12610d086990cedc89fb8b59b7c85740d66e9515aed062649605d0bf"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:3bb461352c0f0f2ec460a4b19400a665b8a5a3a2da663a32093df1699642ee3f"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:f91c75edd6cf1a66f02425bafc59a22ec29bc0adcbc06f4bfd694d92f424ceb3"}, - {file = "ijson-3.1.4-cp36-cp36m-win32.whl", hash = "sha256:4c53cc72f79a4c32d5fc22efb85aa22f248e8f4f992707a84bdc896cc0b1ecf9"}, - {file = "ijson-3.1.4-cp36-cp36m-win_amd64.whl", hash = "sha256:ac9098470c1ff6e5c23ec0946818bc102bfeeeea474554c8d081dc934be20988"}, - {file = "ijson-3.1.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dcd6f04df44b1945b859318010234651317db2c4232f75e3933f8bb41c4fa055"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:5a2f40c053c837591636dc1afb79d85e90b9a9d65f3d9963aae31d1eb11bfed2"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:f50337e3b8e72ec68441b573c2848f108a8976a57465c859b227ebd2a2342901"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:454918f908abbed3c50a0a05c14b20658ab711b155e4f890900e6f60746dd7cc"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:387c2ec434cc1bc7dc9bd33ec0b70d95d443cc1e5934005f26addc2284a437ab"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:179ed6fd42e121d252b43a18833df2de08378fac7bce380974ef6f5e522afefa"}, - {file = "ijson-3.1.4-cp37-cp37m-win32.whl", hash = "sha256:26a6a550b270df04e3f442e2bf0870c9362db4912f0e7bdfd300f30ea43115a2"}, - {file = "ijson-3.1.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ff8cf7507d9d8939264068c2cff0a23f99703fa2f31eb3cb45a9a52798843586"}, - {file = "ijson-3.1.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:09c9d7913c88a6059cd054ff854958f34d757402b639cf212ffbec201a705a0d"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:702ba9a732116d659a5e950ee176be6a2e075998ef1bcde11cbf79a77ed0f717"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:667841591521158770adc90793c2bdbb47c94fe28888cb802104b8bbd61f3d51"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:df641dd07b38c63eecd4f454db7b27aa5201193df160f06b48111ba97ab62504"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:9348e7d507eb40b52b12eecff3d50934fcc3d2a15a2f54ec1127a36063b9ba8f"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:93455902fdc33ba9485c7fae63ac95d96e0ab8942224a357113174bbeaff92e9"}, - {file = "ijson-3.1.4-cp38-cp38-win32.whl", hash = "sha256:5b725f2e984ce70d464b195f206fa44bebbd744da24139b61fec72de77c03a16"}, - {file = "ijson-3.1.4-cp38-cp38-win_amd64.whl", hash = "sha256:a5965c315fbb2dc9769dfdf046eb07daf48ae20b637da95ec8d62b629be09df4"}, - {file = "ijson-3.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b8ee7dbb07cec9ba29d60cfe4954b3cc70adb5f85bba1f72225364b59c1cf82b"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d9e01c55d501e9c3d686b6ee3af351c9c0c8c3e45c5576bd5601bee3e1300b09"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:297f26f27a04cd0d0a2f865d154090c48ea11b239cabe0a17a6c65f0314bd1ca"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:9239973100338a4138d09d7a4602bd289861e553d597cd67390c33bfc452253e"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:2a64c66a08f56ed45a805691c2fd2e1caef00edd6ccf4c4e5eff02cd94ad8364"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:d17fd199f0d0a4ab6e0d541b4eec1b68b5bd5bb5d8104521e22243015b51049b"}, - {file = "ijson-3.1.4-cp39-cp39-win32.whl", hash = "sha256:70ee3c8fa0eba18c80c5911639c01a8de4089a4361bad2862a9949e25ec9b1c8"}, - {file = "ijson-3.1.4-cp39-cp39-win_amd64.whl", hash = "sha256:6bf2b64304321705d03fa5e403ec3f36fa5bb27bf661849ad62e0a3a49bc23e3"}, - {file = "ijson-3.1.4-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:5d7e3fcc3b6de76a9dba1e9fc6ca23dad18f0fa6b4e6499415e16b684b2e9af1"}, - {file = "ijson-3.1.4-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:a72eb0359ebff94754f7a2f00a6efe4c57716f860fc040c606dedcb40f49f233"}, - {file = "ijson-3.1.4-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:28fc168f5faf5759fdfa2a63f85f1f7a148bbae98f34404a6ba19f3d08e89e87"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2844d4a38d27583897ed73f7946e205b16926b4cab2525d1ce17e8b08064c706"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:252defd1f139b5fb8c764d78d5e3a6df81543d9878c58992a89b261369ea97a7"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:15d5356b4d090c699f382c8eb6a2bcd5992a8c8e8b88c88bc6e54f686018328a"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-win32.whl", hash = "sha256:6774ec0a39647eea70d35fb76accabe3d71002a8701c0545b9120230c182b75b"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f11da15ec04cc83ff0f817a65a3392e169be8d111ba81f24d6e09236597bb28c"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:ee13ceeed9b6cf81b3b8197ef15595fc43fd54276842ed63840ddd49db0603da"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:97e4df67235fae40d6195711223520d2c5bf1f7f5087c2963fcde44d72ebf448"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-win32.whl", hash = "sha256:3d10eee52428f43f7da28763bb79f3d90bbbeea1accb15de01e40a00885b6e89"}, - {file = "ijson-3.1.4.tar.gz", hash = "sha256:1d1003ae3c6115ec9b587d29dd136860a81a23c7626b682e2b5b12c9fd30e4ea"}, -] -importlib-metadata = [ - {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"}, - {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"}, -] -importlib-resources = [ - {file = "importlib_resources-5.4.0-py3-none-any.whl", hash = "sha256:33a95faed5fc19b4bc16b29a6eeae248a3fe69dd55d4d229d2b480e23eeaad45"}, - {file = "importlib_resources-5.4.0.tar.gz", hash = "sha256:d756e2f85dd4de2ba89be0b21dba2a3bbec2e871a42a3a16719258a11f87506b"}, -] -incremental = [ - {file = "incremental-21.3.0-py2.py3-none-any.whl", hash = "sha256:92014aebc6a20b78a8084cdd5645eeaa7f74b8933f70fa3ada2cfbd1e3b54321"}, - {file = "incremental-21.3.0.tar.gz", hash = "sha256:02f5de5aff48f6b9f665d99d48bfc7ec03b6e3943210de7cfc88856d755d6f57"}, -] -isort = [ - {file = "isort-5.11.4-py3-none-any.whl", hash = "sha256:c033fd0edb91000a7f09527fe5c75321878f98322a77ddcc81adbd83724afb7b"}, - {file = "isort-5.11.4.tar.gz", hash = "sha256:6db30c5ded9815d813932c04c2f85a360bcdd35fed496f4d8f35495ef0a261b6"}, -] -jaeger-client = [ - {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, -] -jeepney = [ - {file = "jeepney-0.7.1-py3-none-any.whl", hash = "sha256:1b5a0ea5c0e7b166b2f5895b91a08c14de8915afda4407fb5022a195224958ac"}, - {file = "jeepney-0.7.1.tar.gz", hash = "sha256:fa9e232dfa0c498bd0b8a3a73b8d8a31978304dcef0515adc859d4e096f96f4f"}, -] -jinja2 = [ - {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, - {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, -] -jsonschema = [ - {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"}, - {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"}, -] -keyring = [ - {file = "keyring-23.5.0-py3-none-any.whl", hash = "sha256:b0d28928ac3ec8e42ef4cc227822647a19f1d544f21f96457965dc01cf555261"}, - {file = "keyring-23.5.0.tar.gz", hash = "sha256:9012508e141a80bd1c0b6778d5c610dd9f8c464d75ac6774248500503f972fb9"}, -] -ldap3 = [ - {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, - {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, -] -lxml = [ - {file = "lxml-4.9.2-cp27-cp27m-macosx_10_15_x86_64.whl", hash = "sha256:76cf573e5a365e790396a5cc2b909812633409306c6531a6877c59061e42c4f2"}, - {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b1f42b6921d0e81b1bcb5e395bc091a70f41c4d4e55ba99c6da2b31626c44892"}, - {file = "lxml-4.9.2-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9f102706d0ca011de571de32c3247c6476b55bb6bc65a20f682f000b07a4852a"}, - {file = "lxml-4.9.2-cp27-cp27m-win32.whl", hash = "sha256:8d0b4612b66ff5d62d03bcaa043bb018f74dfea51184e53f067e6fdcba4bd8de"}, - {file = "lxml-4.9.2-cp27-cp27m-win_amd64.whl", hash = "sha256:4c8f293f14abc8fd3e8e01c5bd86e6ed0b6ef71936ded5bf10fe7a5efefbaca3"}, - {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2899456259589aa38bfb018c364d6ae7b53c5c22d8e27d0ec7609c2a1ff78b50"}, - {file = "lxml-4.9.2-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6749649eecd6a9871cae297bffa4ee76f90b4504a2a2ab528d9ebe912b101975"}, - {file = "lxml-4.9.2-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:a08cff61517ee26cb56f1e949cca38caabe9ea9fbb4b1e10a805dc39844b7d5c"}, - {file = "lxml-4.9.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:85cabf64adec449132e55616e7ca3e1000ab449d1d0f9d7f83146ed5bdcb6d8a"}, - {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:8340225bd5e7a701c0fa98284c849c9b9fc9238abf53a0ebd90900f25d39a4e4"}, - {file = "lxml-4.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:1ab8f1f932e8f82355e75dda5413a57612c6ea448069d4fb2e217e9a4bed13d4"}, - {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:699a9af7dffaf67deeae27b2112aa06b41c370d5e7633e0ee0aea2e0b6c211f7"}, - {file = "lxml-4.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b9cc34af337a97d470040f99ba4282f6e6bac88407d021688a5d585e44a23184"}, - {file = "lxml-4.9.2-cp310-cp310-win32.whl", hash = "sha256:d02a5399126a53492415d4906ab0ad0375a5456cc05c3fc0fc4ca11771745cda"}, - {file = "lxml-4.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:a38486985ca49cfa574a507e7a2215c0c780fd1778bb6290c21193b7211702ab"}, - {file = "lxml-4.9.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:c83203addf554215463b59f6399835201999b5e48019dc17f182ed5ad87205c9"}, - {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2a87fa548561d2f4643c99cd13131acb607ddabb70682dcf1dff5f71f781a4bf"}, - {file = "lxml-4.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:d6b430a9938a5a5d85fc107d852262ddcd48602c120e3dbb02137c83d212b380"}, - {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3efea981d956a6f7173b4659849f55081867cf897e719f57383698af6f618a92"}, - {file = "lxml-4.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:df0623dcf9668ad0445e0558a21211d4e9a149ea8f5666917c8eeec515f0a6d1"}, - {file = "lxml-4.9.2-cp311-cp311-win32.whl", hash = "sha256:da248f93f0418a9e9d94b0080d7ebc407a9a5e6d0b57bb30db9b5cc28de1ad33"}, - {file = "lxml-4.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:3818b8e2c4b5148567e1b09ce739006acfaa44ce3156f8cbbc11062994b8e8dd"}, - {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ca989b91cf3a3ba28930a9fc1e9aeafc2a395448641df1f387a2d394638943b0"}, - {file = "lxml-4.9.2-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:822068f85e12a6e292803e112ab876bc03ed1f03dddb80154c395f891ca6b31e"}, - {file = "lxml-4.9.2-cp35-cp35m-win32.whl", hash = "sha256:be7292c55101e22f2a3d4d8913944cbea71eea90792bf914add27454a13905df"}, - {file = "lxml-4.9.2-cp35-cp35m-win_amd64.whl", hash = "sha256:998c7c41910666d2976928c38ea96a70d1aa43be6fe502f21a651e17483a43c5"}, - {file = "lxml-4.9.2-cp36-cp36m-macosx_10_15_x86_64.whl", hash = "sha256:b26a29f0b7fc6f0897f043ca366142d2b609dc60756ee6e4e90b5f762c6adc53"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:ab323679b8b3030000f2be63e22cdeea5b47ee0abd2d6a1dc0c8103ddaa56cd7"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:689bb688a1db722485e4610a503e3e9210dcc20c520b45ac8f7533c837be76fe"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:f49e52d174375a7def9915c9f06ec4e569d235ad428f70751765f48d5926678c"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:36c3c175d34652a35475a73762b545f4527aec044910a651d2bf50de9c3352b1"}, - {file = "lxml-4.9.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a35f8b7fa99f90dd2f5dc5a9fa12332642f087a7641289ca6c40d6e1a2637d8e"}, - {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:58bfa3aa19ca4c0f28c5dde0ff56c520fbac6f0daf4fac66ed4c8d2fb7f22e74"}, - {file = "lxml-4.9.2-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bc718cd47b765e790eecb74d044cc8d37d58562f6c314ee9484df26276d36a38"}, - {file = "lxml-4.9.2-cp36-cp36m-win32.whl", hash = "sha256:d5bf6545cd27aaa8a13033ce56354ed9e25ab0e4ac3b5392b763d8d04b08e0c5"}, - {file = "lxml-4.9.2-cp36-cp36m-win_amd64.whl", hash = "sha256:3ab9fa9d6dc2a7f29d7affdf3edebf6ece6fb28a6d80b14c3b2fb9d39b9322c3"}, - {file = "lxml-4.9.2-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:05ca3f6abf5cf78fe053da9b1166e062ade3fa5d4f92b4ed688127ea7d7b1d03"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:a5da296eb617d18e497bcf0a5c528f5d3b18dadb3619fbdadf4ed2356ef8d941"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:04876580c050a8c5341d706dd464ff04fd597095cc8c023252566a8826505726"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:c9ec3eaf616d67db0764b3bb983962b4f385a1f08304fd30c7283954e6a7869b"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2a29ba94d065945944016b6b74e538bdb1751a1db6ffb80c9d3c2e40d6fa9894"}, - {file = "lxml-4.9.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:a82d05da00a58b8e4c0008edbc8a4b6ec5a4bc1e2ee0fb6ed157cf634ed7fa45"}, - {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:223f4232855ade399bd409331e6ca70fb5578efef22cf4069a6090acc0f53c0e"}, - {file = "lxml-4.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d17bc7c2ccf49c478c5bdd447594e82692c74222698cfc9b5daae7ae7e90743b"}, - {file = "lxml-4.9.2-cp37-cp37m-win32.whl", hash = "sha256:b64d891da92e232c36976c80ed7ebb383e3f148489796d8d31a5b6a677825efe"}, - {file = "lxml-4.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:a0a336d6d3e8b234a3aae3c674873d8f0e720b76bc1d9416866c41cd9500ffb9"}, - {file = "lxml-4.9.2-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:da4dd7c9c50c059aba52b3524f84d7de956f7fef88f0bafcf4ad7dde94a064e8"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:821b7f59b99551c69c85a6039c65b75f5683bdc63270fec660f75da67469ca24"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e5168986b90a8d1f2f9dc1b841467c74221bd752537b99761a93d2d981e04889"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:8e20cb5a47247e383cf4ff523205060991021233ebd6f924bca927fcf25cf86f"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:13598ecfbd2e86ea7ae45ec28a2a54fb87ee9b9fdb0f6d343297d8e548392c03"}, - {file = "lxml-4.9.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:880bbbcbe2fca64e2f4d8e04db47bcdf504936fa2b33933efd945e1b429bea8c"}, - {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7d2278d59425777cfcb19735018d897ca8303abe67cc735f9f97177ceff8027f"}, - {file = "lxml-4.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5344a43228767f53a9df6e5b253f8cdca7dfc7b7aeae52551958192f56d98457"}, - {file = "lxml-4.9.2-cp38-cp38-win32.whl", hash = "sha256:925073b2fe14ab9b87e73f9a5fde6ce6392da430f3004d8b72cc86f746f5163b"}, - {file = "lxml-4.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:9b22c5c66f67ae00c0199f6055705bc3eb3fcb08d03d2ec4059a2b1b25ed48d7"}, - {file = "lxml-4.9.2-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:5f50a1c177e2fa3ee0667a5ab79fdc6b23086bc8b589d90b93b4bd17eb0e64d1"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:090c6543d3696cbe15b4ac6e175e576bcc3f1ccfbba970061b7300b0c15a2140"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:63da2ccc0857c311d764e7d3d90f429c252e83b52d1f8f1d1fe55be26827d1f4"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:5b4545b8a40478183ac06c073e81a5ce4cf01bf1734962577cf2bb569a5b3bbf"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2e430cd2824f05f2d4f687701144556646bae8f249fd60aa1e4c768ba7018947"}, - {file = "lxml-4.9.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6804daeb7ef69e7b36f76caddb85cccd63d0c56dedb47555d2fc969e2af6a1a5"}, - {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:a6e441a86553c310258aca15d1c05903aaf4965b23f3bc2d55f200804e005ee5"}, - {file = "lxml-4.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ca34efc80a29351897e18888c71c6aca4a359247c87e0b1c7ada14f0ab0c0fb2"}, - {file = "lxml-4.9.2-cp39-cp39-win32.whl", hash = "sha256:6b418afe5df18233fc6b6093deb82a32895b6bb0b1155c2cdb05203f583053f1"}, - {file = "lxml-4.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:f1496ea22ca2c830cbcbd473de8f114a320da308438ae65abad6bab7867fe38f"}, - {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b264171e3143d842ded311b7dccd46ff9ef34247129ff5bf5066123c55c2431c"}, - {file = "lxml-4.9.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0dc313ef231edf866912e9d8f5a042ddab56c752619e92dfd3a2c277e6a7299a"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-macosx_10_15_x86_64.whl", hash = "sha256:16efd54337136e8cd72fb9485c368d91d77a47ee2d42b057564aae201257d419"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:0f2b1e0d79180f344ff9f321327b005ca043a50ece8713de61d1cb383fb8ac05"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:7b770ed79542ed52c519119473898198761d78beb24b107acf3ad65deae61f1f"}, - {file = "lxml-4.9.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:efa29c2fe6b4fdd32e8ef81c1528506895eca86e1d8c4657fda04c9b3786ddf9"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7e91ee82f4199af8c43d8158024cbdff3d931df350252288f0d4ce656df7f3b5"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_24_i686.whl", hash = "sha256:b23e19989c355ca854276178a0463951a653309fb8e57ce674497f2d9f208746"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:01d36c05f4afb8f7c20fd9ed5badca32a2029b93b1750f571ccc0b142531caf7"}, - {file = "lxml-4.9.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7b515674acfdcadb0eb5d00d8a709868173acece5cb0be3dd165950cbfdf5409"}, - {file = "lxml-4.9.2.tar.gz", hash = "sha256:2455cfaeb7ac70338b3257f41e21f0724f4b5b0c0e7702da67ee6c3640835b67"}, -] -markupsafe = [ - {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3028252424c72b2602a323f70fbf50aa80a5d3aa616ea6add4ba21ae9cc9da4c"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:290b02bab3c9e216da57c1d11d2ba73a9f73a614bbdcc027d299a60cdfabb11a"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e104c0c2b4cd765b4e83909cde7ec61a1e313f8a75775897db321450e928cce"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24c3be29abb6b34052fd26fc7a8e0a49b1ee9d282e3665e8ad09a0a68faee5b3"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:204730fd5fe2fe3b1e9ccadb2bd18ba8712b111dcabce185af0b3b5285a7c989"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d3b64c65328cb4cd252c94f83e66e3d7acf8891e60ebf588d7b493a55a1dbf26"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:96de1932237abe0a13ba68b63e94113678c379dca45afa040a17b6e1ad7ed076"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:75bb36f134883fdbe13d8e63b8675f5f12b80bb6627f7714c7d6c5becf22719f"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-win32.whl", hash = "sha256:4056f752015dfa9828dce3140dbadd543b555afb3252507348c493def166d454"}, - {file = "MarkupSafe-2.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:d4e702eea4a2903441f2735799d217f4ac1b55f7d8ad96ab7d4e25417cb0827c"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:f0eddfcabd6936558ec020130f932d479930581171368fd728efcfb6ef0dd357"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ddea4c352a488b5e1069069f2f501006b1a4362cb906bee9a193ef1245a7a61"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:09c86c9643cceb1d87ca08cdc30160d1b7ab49a8a21564868921959bd16441b8"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0a0abef2ca47b33fb615b491ce31b055ef2430de52c5b3fb19a4042dbc5cadb"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:736895a020e31b428b3382a7887bfea96102c529530299f426bf2e636aacec9e"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:679cbb78914ab212c49c67ba2c7396dc599a8479de51b9a87b174700abd9ea49"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:84ad5e29bf8bab3ad70fd707d3c05524862bddc54dc040982b0dbcff36481de7"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-win32.whl", hash = "sha256:8da5924cb1f9064589767b0f3fc39d03e3d0fb5aa29e0cb21d43106519bd624a"}, - {file = "MarkupSafe-2.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:454ffc1cbb75227d15667c09f164a0099159da0c1f3d2636aa648f12675491ad"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:142119fb14a1ef6d758912b25c4e803c3ff66920635c44078666fe7cc3f8f759"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b2a5a856019d2833c56a3dcac1b80fe795c95f401818ea963594b345929dffa7"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d1fb9b2eec3c9714dd936860850300b51dbaa37404209c8d4cb66547884b7ed"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62c0285e91414f5c8f621a17b69fc0088394ccdaa961ef469e833dbff64bd5ea"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fc3150f85e2dbcf99e65238c842d1cfe69d3e7649b19864c1cc043213d9cd730"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f02cf7221d5cd915d7fa58ab64f7ee6dd0f6cddbb48683debf5d04ae9b1c2cc1"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d5653619b3eb5cbd35bfba3c12d575db2a74d15e0e1c08bf1db788069d410ce8"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7d2f5d97fcbd004c03df8d8fe2b973fe2b14e7bfeb2cfa012eaa8759ce9a762f"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-win32.whl", hash = "sha256:3cace1837bc84e63b3fd2dfce37f08f8c18aeb81ef5cf6bb9b51f625cb4e6cd8"}, - {file = "MarkupSafe-2.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:fabbe18087c3d33c5824cb145ffca52eccd053061df1d79d4b66dafa5ad2a5ea"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:023af8c54fe63530545f70dd2a2a7eed18d07a9a77b94e8bf1e2ff7f252db9a3"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d66624f04de4af8bbf1c7f21cc06649c1c69a7f84109179add573ce35e46d448"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c532d5ab79be0199fa2658e24a02fce8542df196e60665dd322409a03db6a52c"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e67ec74fada3841b8c5f4c4f197bea916025cb9aa3fe5abf7d52b655d042f956"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c653fde75a6e5eb814d2a0a89378f83d1d3f502ab710904ee585c38888816c"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:961eb86e5be7d0973789f30ebcf6caab60b844203f4396ece27310295a6082c7"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:598b65d74615c021423bd45c2bc5e9b59539c875a9bdb7e5f2a6b92dfcfc268d"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:599941da468f2cf22bf90a84f6e2a65524e87be2fce844f96f2dd9a6c9d1e635"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-win32.whl", hash = "sha256:e6f7f3f41faffaea6596da86ecc2389672fa949bd035251eab26dc6697451d05"}, - {file = "MarkupSafe-2.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:b8811d48078d1cf2a6863dafb896e68406c5f513048451cd2ded0473133473c7"}, - {file = "MarkupSafe-2.1.0.tar.gz", hash = "sha256:80beaf63ddfbc64a0452b841d8036ca0611e049650e20afcb882f5d3c266d65f"}, -] -matrix-common = [ - {file = "matrix_common-1.3.0-py3-none-any.whl", hash = "sha256:524e2785b9b03be4d15f3a8a6b857c5b6af68791ffb1b9918f0ad299abc4db20"}, - {file = "matrix_common-1.3.0.tar.gz", hash = "sha256:62e121cccd9f243417b57ec37a76dc44aeb198a7a5c67afd6b8275992ff2abd1"}, -] -matrix-synapse-ldap3 = [ - {file = "matrix-synapse-ldap3-0.2.2.tar.gz", hash = "sha256:b388d95693486eef69adaefd0fd9e84463d52fe17b0214a00efcaa669b73cb74"}, - {file = "matrix_synapse_ldap3-0.2.2-py3-none-any.whl", hash = "sha256:66ee4c85d7952c6c27fd04c09cdfdf4847b8e8b7d6a7ada6ba1100013bda060f"}, -] -msgpack = [ - {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"}, - {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"}, - {file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"}, - {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"}, - {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"}, - {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"}, - {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"}, - {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"}, - {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"}, - {file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"}, - {file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"}, - {file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"}, - {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"}, - {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"}, - {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"}, - {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"}, - {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"}, - {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"}, - {file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"}, - {file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"}, - {file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"}, - {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"}, - {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"}, - {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"}, - {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"}, - {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"}, - {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"}, - {file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"}, - {file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"}, - {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"}, - {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"}, - {file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"}, - {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"}, - {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"}, - {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"}, - {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"}, - {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"}, - {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"}, - {file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"}, - {file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"}, - {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"}, - {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"}, - {file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"}, - {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"}, - {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"}, - {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"}, - {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"}, - {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"}, - {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"}, - {file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"}, - {file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"}, - {file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"}, -] -mypy = [ - {file = "mypy-0.981-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0"}, - {file = "mypy-0.981-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08"}, - {file = "mypy-0.981-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d"}, - {file = "mypy-0.981-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49"}, - {file = "mypy-0.981-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279"}, - {file = "mypy-0.981-cp310-cp310-win_amd64.whl", hash = "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e"}, - {file = "mypy-0.981-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659"}, - {file = "mypy-0.981-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be"}, - {file = "mypy-0.981-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d"}, - {file = "mypy-0.981-cp37-cp37m-win_amd64.whl", hash = "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae"}, - {file = "mypy-0.981-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30"}, - {file = "mypy-0.981-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb"}, - {file = "mypy-0.981-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a"}, - {file = "mypy-0.981-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"}, - {file = "mypy-0.981-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08"}, - {file = "mypy-0.981-cp38-cp38-win_amd64.whl", hash = "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6"}, - {file = "mypy-0.981-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d"}, - {file = "mypy-0.981-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7"}, - {file = "mypy-0.981-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c"}, - {file = "mypy-0.981-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362"}, - {file = "mypy-0.981-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1"}, - {file = "mypy-0.981-cp39-cp39-win_amd64.whl", hash = "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92"}, - {file = "mypy-0.981-py3-none-any.whl", hash = "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8"}, - {file = "mypy-0.981.tar.gz", hash = "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4"}, -] -mypy-extensions = [ - {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, - {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, -] -mypy-zope = [ - {file = "mypy-zope-0.3.11.tar.gz", hash = "sha256:d4255f9f04d48c79083bbd4e2fea06513a6ac7b8de06f8c4ce563fd85142ca05"}, - {file = "mypy_zope-0.3.11-py3-none-any.whl", hash = "sha256:ec080a6508d1f7805c8d2054f9fdd13c849742ce96803519e1fdfa3d3cab7140"}, -] -netaddr = [ - {file = "netaddr-0.8.0-py2.py3-none-any.whl", hash = "sha256:9666d0232c32d2656e5e5f8d735f58fd6c7457ce52fc21c98d45f2af78f990ac"}, - {file = "netaddr-0.8.0.tar.gz", hash = "sha256:d6cc57c7a07b1d9d2e917aa8b36ae8ce61c35ba3fcd1b83ca31c5a0ee2b5a243"}, -] -opentracing = [ - {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, -] -packaging = [ - {file = "packaging-22.0-py3-none-any.whl", hash = "sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3"}, - {file = "packaging-22.0.tar.gz", hash = "sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3"}, -] -parameterized = [ - {file = "parameterized-0.8.1-py2.py3-none-any.whl", hash = "sha256:9cbb0b69a03e8695d68b3399a8a5825200976536fe1cb79db60ed6a4c8c9efe9"}, - {file = "parameterized-0.8.1.tar.gz", hash = "sha256:41bbff37d6186430f77f900d777e5bb6a24928a1c46fb1de692f8b52b8833b5c"}, -] -pathspec = [ - {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"}, - {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, -] -phonenumbers = [ - {file = "phonenumbers-8.13.2-py2.py3-none-any.whl", hash = "sha256:884b26f775205261f4dc861371dce217c1661a4942fb3ec3624e290fb51869bf"}, - {file = "phonenumbers-8.13.2.tar.gz", hash = "sha256:0179f688d48c0e7e161eb7b9d86d587940af1f5174f97c1fdfd893c599c0d94a"}, -] -pillow = [ - {file = "Pillow-9.4.0-1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1b4b4e9dda4f4e4c4e6896f93e84a8f0bcca3b059de9ddf67dac3c334b1195e1"}, - {file = "Pillow-9.4.0-1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:fb5c1ad6bad98c57482236a21bf985ab0ef42bd51f7ad4e4538e89a997624e12"}, - {file = "Pillow-9.4.0-1-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:f0caf4a5dcf610d96c3bd32932bfac8aee61c96e60481c2a0ea58da435e25acd"}, - {file = "Pillow-9.4.0-1-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:3f4cc516e0b264c8d4ccd6b6cbc69a07c6d582d8337df79be1e15a5056b258c9"}, - {file = "Pillow-9.4.0-1-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:b8c2f6eb0df979ee99433d8b3f6d193d9590f735cf12274c108bd954e30ca858"}, - {file = "Pillow-9.4.0-1-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b70756ec9417c34e097f987b4d8c510975216ad26ba6e57ccb53bc758f490dab"}, - {file = "Pillow-9.4.0-1-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:43521ce2c4b865d385e78579a082b6ad1166ebed2b1a2293c3be1d68dd7ca3b9"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:2968c58feca624bb6c8502f9564dd187d0e1389964898f5e9e1fbc8533169157"}, - {file = "Pillow-9.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c5c1362c14aee73f50143d74389b2c158707b4abce2cb055b7ad37ce60738d47"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd752c5ff1b4a870b7661234694f24b1d2b9076b8bf337321a814c612665f343"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3049a10261d7f2b6514d35bbb7a4dfc3ece4c4de14ef5876c4b7a23a0e566d"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16a8df99701f9095bea8a6c4b3197da105df6f74e6176c5b410bc2df2fd29a57"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:94cdff45173b1919350601f82d61365e792895e3c3a3443cf99819e6fbf717a5"}, - {file = "Pillow-9.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:ed3e4b4e1e6de75fdc16d3259098de7c6571b1a6cc863b1a49e7d3d53e036070"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d5b2f8a31bd43e0f18172d8ac82347c8f37ef3e0b414431157718aa234991b28"}, - {file = "Pillow-9.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:09b89ddc95c248ee788328528e6a2996e09eaccddeeb82a5356e92645733be35"}, - {file = "Pillow-9.4.0-cp310-cp310-win32.whl", hash = "sha256:f09598b416ba39a8f489c124447b007fe865f786a89dbfa48bb5cf395693132a"}, - {file = "Pillow-9.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:f6e78171be3fb7941f9910ea15b4b14ec27725865a73c15277bc39f5ca4f8391"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:3fa1284762aacca6dc97474ee9c16f83990b8eeb6697f2ba17140d54b453e133"}, - {file = "Pillow-9.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:eaef5d2de3c7e9b21f1e762f289d17b726c2239a42b11e25446abf82b26ac132"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4dfdae195335abb4e89cc9762b2edc524f3c6e80d647a9a81bf81e17e3fb6f0"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6abfb51a82e919e3933eb137e17c4ae9c0475a25508ea88993bb59faf82f3b35"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451f10ef963918e65b8869e17d67db5e2f4ab40e716ee6ce7129b0cde2876eab"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:6663977496d616b618b6cfa43ec86e479ee62b942e1da76a2c3daa1c75933ef4"}, - {file = "Pillow-9.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:60e7da3a3ad1812c128750fc1bc14a7ceeb8d29f77e0a2356a8fb2aa8925287d"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19005a8e58b7c1796bc0167862b1f54a64d3b44ee5d48152b06bb861458bc0f8"}, - {file = "Pillow-9.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f715c32e774a60a337b2bb8ad9839b4abf75b267a0f18806f6f4f5f1688c4b5a"}, - {file = "Pillow-9.4.0-cp311-cp311-win32.whl", hash = "sha256:b222090c455d6d1a64e6b7bb5f4035c4dff479e22455c9eaa1bdd4c75b52c80c"}, - {file = "Pillow-9.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:ba6612b6548220ff5e9df85261bddc811a057b0b465a1226b39bfb8550616aee"}, - {file = "Pillow-9.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:5f532a2ad4d174eb73494e7397988e22bf427f91acc8e6ebf5bb10597b49c493"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dd5a9c3091a0f414a963d427f920368e2b6a4c2f7527fdd82cde8ef0bc7a327"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef21af928e807f10bf4141cad4746eee692a0dd3ff56cfb25fce076ec3cc8abe"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:847b114580c5cc9ebaf216dd8c8dbc6b00a3b7ab0131e173d7120e6deade1f57"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_aarch64.whl", hash = "sha256:653d7fb2df65efefbcbf81ef5fe5e5be931f1ee4332c2893ca638c9b11a409c4"}, - {file = "Pillow-9.4.0-cp37-cp37m-manylinux_2_28_x86_64.whl", hash = "sha256:46f39cab8bbf4a384ba7cb0bc8bae7b7062b6a11cfac1ca4bc144dea90d4a9f5"}, - {file = "Pillow-9.4.0-cp37-cp37m-win32.whl", hash = "sha256:7ac7594397698f77bce84382929747130765f66406dc2cd8b4ab4da68ade4c6e"}, - {file = "Pillow-9.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:46c259e87199041583658457372a183636ae8cd56dbf3f0755e0f376a7f9d0e6"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:0e51f608da093e5d9038c592b5b575cadc12fd748af1479b5e858045fff955a9"}, - {file = "Pillow-9.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:765cb54c0b8724a7c12c55146ae4647e0274a839fb6de7bcba841e04298e1011"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:519e14e2c49fcf7616d6d2cfc5c70adae95682ae20f0395e9280db85e8d6c4df"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d197df5489004db87d90b918033edbeee0bd6df3848a204bca3ff0a903bef837"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0845adc64fe9886db00f5ab68c4a8cd933ab749a87747555cec1c95acea64b0b"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:e1339790c083c5a4de48f688b4841f18df839eb3c9584a770cbd818b33e26d5d"}, - {file = "Pillow-9.4.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:a96e6e23f2b79433390273eaf8cc94fec9c6370842e577ab10dabdcc7ea0a66b"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7cfc287da09f9d2a7ec146ee4d72d6ea1342e770d975e49a8621bf54eaa8f30f"}, - {file = "Pillow-9.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d7081c084ceb58278dd3cf81f836bc818978c0ccc770cbbb202125ddabec6628"}, - {file = "Pillow-9.4.0-cp38-cp38-win32.whl", hash = "sha256:df41112ccce5d47770a0c13651479fbcd8793f34232a2dd9faeccb75eb5d0d0d"}, - {file = "Pillow-9.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:7a21222644ab69ddd9967cfe6f2bb420b460dae4289c9d40ff9a4896e7c35c9a"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0f3269304c1a7ce82f1759c12ce731ef9b6e95b6df829dccd9fe42912cc48569"}, - {file = "Pillow-9.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cb362e3b0976dc994857391b776ddaa8c13c28a16f80ac6522c23d5257156bed"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2e0f87144fcbbe54297cae708c5e7f9da21a4646523456b00cc956bd4c65815"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28676836c7796805914b76b1837a40f76827ee0d5398f72f7dcc634bae7c6264"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0884ba7b515163a1a05440a138adeb722b8a6ae2c2b33aea93ea3118dd3a899e"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:53dcb50fbdc3fb2c55431a9b30caeb2f7027fcd2aeb501459464f0214200a503"}, - {file = "Pillow-9.4.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:e8c5cf126889a4de385c02a2c3d3aba4b00f70234bfddae82a5eaa3ee6d5e3e6"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:6c6b1389ed66cdd174d040105123a5a1bc91d0aa7059c7261d20e583b6d8cbd2"}, - {file = "Pillow-9.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0dd4c681b82214b36273c18ca7ee87065a50e013112eea7d78c7a1b89a739153"}, - {file = "Pillow-9.4.0-cp39-cp39-win32.whl", hash = "sha256:6d9dfb9959a3b0039ee06c1a1a90dc23bac3b430842dcb97908ddde05870601c"}, - {file = "Pillow-9.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:54614444887e0d3043557d9dbc697dbb16cfb5a35d672b7a0fcc1ed0cf1c600b"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b9b752ab91e78234941e44abdecc07f1f0d8f51fb62941d32995b8161f68cfe5"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3b56206244dc8711f7e8b7d6cad4663917cd5b2d950799425076681e8766286"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aabdab8ec1e7ca7f1434d042bf8b1e92056245fb179790dc97ed040361f16bfd"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:db74f5562c09953b2c5f8ec4b7dfd3f5421f31811e97d1dbc0a7c93d6e3a24df"}, - {file = "Pillow-9.4.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:e9d7747847c53a16a729b6ee5e737cf170f7a16611c143d95aa60a109a59c336"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b52ff4f4e002f828ea6483faf4c4e8deea8d743cf801b74910243c58acc6eda3"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:575d8912dca808edd9acd6f7795199332696d3469665ef26163cd090fa1f8bfa"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3c4ed2ff6760e98d262e0cc9c9a7f7b8a9f61aa4d47c58835cdaf7b0b8811bb"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e621b0246192d3b9cb1dc62c78cfa4c6f6d2ddc0ec207d43c0dedecb914f152a"}, - {file = "Pillow-9.4.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8f127e7b028900421cad64f51f75c051b628db17fb00e099eb148761eed598c9"}, - {file = "Pillow-9.4.0.tar.gz", hash = "sha256:a1c2d7780448eb93fbcc3789bf3916aa5720d942e37945f4056680317f1cd23e"}, -] -pkginfo = [ - {file = "pkginfo-1.8.2-py2.py3-none-any.whl", hash = "sha256:c24c487c6a7f72c66e816ab1796b96ac6c3d14d49338293d2141664330b55ffc"}, - {file = "pkginfo-1.8.2.tar.gz", hash = "sha256:542e0d0b6750e2e21c20179803e40ab50598d8066d51097a0e382cba9eb02bff"}, -] -pkgutil_resolve_name = [ - {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, - {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, -] -platformdirs = [ - {file = "platformdirs-2.5.1-py3-none-any.whl", hash = "sha256:bcae7cab893c2d310a711b70b24efb93334febe65f8de776ee320b517471e227"}, - {file = "platformdirs-2.5.1.tar.gz", hash = "sha256:7535e70dfa32e84d4b34996ea99c5e432fa29a708d0f4e394bbcb2a8faa4f16d"}, -] -prometheus-client = [ - {file = "prometheus_client-0.15.0-py3-none-any.whl", hash = "sha256:db7c05cbd13a0f79975592d112320f2605a325969b270a94b71dcabc47b931d2"}, - {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"}, -] -psycopg2 = [ - {file = "psycopg2-2.9.5-cp310-cp310-win32.whl", hash = "sha256:d3ef67e630b0de0779c42912fe2cbae3805ebaba30cda27fea2a3de650a9414f"}, - {file = "psycopg2-2.9.5-cp310-cp310-win_amd64.whl", hash = "sha256:4cb9936316d88bfab614666eb9e32995e794ed0f8f6b3b718666c22819c1d7ee"}, - {file = "psycopg2-2.9.5-cp311-cp311-win32.whl", hash = "sha256:093e3894d2d3c592ab0945d9eba9d139c139664dcf83a1c440b8a7aa9bb21955"}, - {file = "psycopg2-2.9.5-cp311-cp311-win_amd64.whl", hash = "sha256:920bf418000dd17669d2904472efeab2b20546efd0548139618f8fa305d1d7ad"}, - {file = "psycopg2-2.9.5-cp36-cp36m-win32.whl", hash = "sha256:b9ac1b0d8ecc49e05e4e182694f418d27f3aedcfca854ebd6c05bb1cffa10d6d"}, - {file = "psycopg2-2.9.5-cp36-cp36m-win_amd64.whl", hash = "sha256:fc04dd5189b90d825509caa510f20d1d504761e78b8dfb95a0ede180f71d50e5"}, - {file = "psycopg2-2.9.5-cp37-cp37m-win32.whl", hash = "sha256:922cc5f0b98a5f2b1ff481f5551b95cd04580fd6f0c72d9b22e6c0145a4840e0"}, - {file = "psycopg2-2.9.5-cp37-cp37m-win_amd64.whl", hash = "sha256:1e5a38aa85bd660c53947bd28aeaafb6a97d70423606f1ccb044a03a1203fe4a"}, - {file = "psycopg2-2.9.5-cp38-cp38-win32.whl", hash = "sha256:f5b6320dbc3cf6cfb9f25308286f9f7ab464e65cfb105b64cc9c52831748ced2"}, - {file = "psycopg2-2.9.5-cp38-cp38-win_amd64.whl", hash = "sha256:1a5c7d7d577e0eabfcf15eb87d1e19314c8c4f0e722a301f98e0e3a65e238b4e"}, - {file = "psycopg2-2.9.5-cp39-cp39-win32.whl", hash = "sha256:322fd5fca0b1113677089d4ebd5222c964b1760e361f151cbb2706c4912112c5"}, - {file = "psycopg2-2.9.5-cp39-cp39-win_amd64.whl", hash = "sha256:190d51e8c1b25a47484e52a79638a8182451d6f6dff99f26ad9bd81e5359a0fa"}, - {file = "psycopg2-2.9.5.tar.gz", hash = "sha256:a5246d2e683a972e2187a8714b5c2cf8156c064629f9a9b1a873c1730d9e245a"}, -] -psycopg2cffi = [ - {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"}, -] -psycopg2cffi-compat = [ - {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"}, -] -pyasn1 = [ - {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"}, - {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"}, -] -pyasn1-modules = [ - {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"}, - {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"}, -] -pycparser = [ - {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, - {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, -] -pydantic = [ - {file = "pydantic-1.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5635de53e6686fe7a44b5cf25fcc419a0d5e5c1a1efe73d49d48fe7586db854"}, - {file = "pydantic-1.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6dc1cc241440ed7ca9ab59d9929075445da6b7c94ced281b3dd4cfe6c8cff817"}, - {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51bdeb10d2db0f288e71d49c9cefa609bca271720ecd0c58009bd7504a0c464c"}, - {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78cec42b95dbb500a1f7120bdf95c401f6abb616bbe8785ef09887306792e66e"}, - {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8775d4ef5e7299a2f4699501077a0defdaac5b6c4321173bcb0f3c496fbadf85"}, - {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:572066051eeac73d23f95ba9a71349c42a3e05999d0ee1572b7860235b850cc6"}, - {file = "pydantic-1.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:7feb6a2d401f4d6863050f58325b8d99c1e56f4512d98b11ac64ad1751dc647d"}, - {file = "pydantic-1.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39f4a73e5342b25c2959529f07f026ef58147249f9b7431e1ba8414a36761f53"}, - {file = "pydantic-1.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:983e720704431a6573d626b00662eb78a07148c9115129f9b4351091ec95ecc3"}, - {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d52162fe6b2b55964fbb0af2ee58e99791a3138588c482572bb6087953113a"}, - {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdf8d759ef326962b4678d89e275ffc55b7ce59d917d9f72233762061fd04a2d"}, - {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05a81b006be15655b2a1bae5faa4280cf7c81d0e09fcb49b342ebf826abe5a72"}, - {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d88c4c0e5c5dfd05092a4b271282ef0588e5f4aaf345778056fc5259ba098857"}, - {file = "pydantic-1.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:6a05a9db1ef5be0fe63e988f9617ca2551013f55000289c671f71ec16f4985e3"}, - {file = "pydantic-1.10.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:887ca463c3bc47103c123bc06919c86720e80e1214aab79e9b779cda0ff92a00"}, - {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdf88ab63c3ee282c76d652fc86518aacb737ff35796023fae56a65ced1a5978"}, - {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a48f1953c4a1d9bd0b5167ac50da9a79f6072c63c4cef4cf2a3736994903583e"}, - {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a9f2de23bec87ff306aef658384b02aa7c32389766af3c5dee9ce33e80222dfa"}, - {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cd8702c5142afda03dc2b1ee6bc358b62b3735b2cce53fc77b31ca9f728e4bc8"}, - {file = "pydantic-1.10.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6e7124d6855b2780611d9f5e1e145e86667eaa3bd9459192c8dc1a097f5e9903"}, - {file = "pydantic-1.10.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b53e1d41e97063d51a02821b80538053ee4608b9a181c1005441f1673c55423"}, - {file = "pydantic-1.10.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:55b1625899acd33229c4352ce0ae54038529b412bd51c4915349b49ca575258f"}, - {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:301d626a59edbe5dfb48fcae245896379a450d04baeed50ef40d8199f2733b06"}, - {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6f9d649892a6f54a39ed56b8dfd5e08b5f3be5f893da430bed76975f3735d15"}, - {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7b5a3821225f5c43496c324b0d6875fde910a1c2933d726a743ce328fbb2a8c"}, - {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f2f7eb6273dd12472d7f218e1fef6f7c7c2f00ac2e1ecde4db8824c457300416"}, - {file = "pydantic-1.10.4-cp38-cp38-win_amd64.whl", hash = "sha256:4b05697738e7d2040696b0a66d9f0a10bec0efa1883ca75ee9e55baf511909d6"}, - {file = "pydantic-1.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a9a6747cac06c2beb466064dda999a13176b23535e4c496c9d48e6406f92d42d"}, - {file = "pydantic-1.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb992a1ef739cc7b543576337bebfc62c0e6567434e522e97291b251a41dad7f"}, - {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:990406d226dea0e8f25f643b370224771878142155b879784ce89f633541a024"}, - {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e82a6d37a95e0b1b42b82ab340ada3963aea1317fd7f888bb6b9dfbf4fff57c"}, - {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9193d4f4ee8feca58bc56c8306bcb820f5c7905fd919e0750acdeeeef0615b28"}, - {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2b3ce5f16deb45c472dde1a0ee05619298c864a20cded09c4edd820e1454129f"}, - {file = "pydantic-1.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:9cbdc268a62d9a98c56e2452d6c41c0263d64a2009aac69246486f01b4f594c4"}, - {file = "pydantic-1.10.4-py3-none-any.whl", hash = "sha256:4948f264678c703f3877d1c8877c4e3b2e12e549c57795107f08cf70c6ec7774"}, - {file = "pydantic-1.10.4.tar.gz", hash = "sha256:b9a3859f24eb4e097502a3be1fb4b2abb79b6103dd9e2e0edb70613a4459a648"}, -] -pygithub = [ - {file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"}, - {file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"}, -] -pygments = [ - {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"}, - {file = "Pygments-2.11.2.tar.gz", hash = "sha256:4e426f72023d88d03b2fa258de560726ce890ff3b630f88c21cbb8b2503b8c6a"}, -] -pyicu = [ - {file = "PyICU-2.10.2.tar.gz", hash = "sha256:0c3309eea7fab6857507ace62403515b60fe096cbfb4f90d14f55ff75c5441c1"}, -] -pyjwt = [ - {file = "PyJWT-2.4.0-py3-none-any.whl", hash = "sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf"}, - {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"}, -] -pymacaroons = [ - {file = "pymacaroons-0.13.0-py2.py3-none-any.whl", hash = "sha256:3e14dff6a262fdbf1a15e769ce635a8aea72e6f8f91e408f9a97166c53b91907"}, - {file = "pymacaroons-0.13.0.tar.gz", hash = "sha256:1e6bba42a5f66c245adf38a5a4006a99dcc06a0703786ea636098667d42903b8"}, -] -pympler = [ - {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"}, - {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"}, -] -pynacl = [ - {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"}, - {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"}, - {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"}, - {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"}, - {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"}, - {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"}, - {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"}, -] -pyopenssl = [ - {file = "pyOpenSSL-23.0.0-py3-none-any.whl", hash = "sha256:df5fc28af899e74e19fccb5510df423581047e10ab6f1f4ba1763ff5fde844c0"}, - {file = "pyOpenSSL-23.0.0.tar.gz", hash = "sha256:c1cc5f86bcacefc84dada7d31175cae1b1518d5f60d3d0bb595a67822a868a6f"}, -] -pyrsistent = [ - {file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"}, - {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26"}, - {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e"}, - {file = "pyrsistent-0.18.1-cp310-cp310-win32.whl", hash = "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6"}, - {file = "pyrsistent-0.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-win32.whl", hash = "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8"}, - {file = "pyrsistent-0.18.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286"}, - {file = "pyrsistent-0.18.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"}, - {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec"}, - {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c"}, - {file = "pyrsistent-0.18.1-cp38-cp38-win32.whl", hash = "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca"}, - {file = "pyrsistent-0.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a"}, - {file = "pyrsistent-0.18.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5"}, - {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045"}, - {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c"}, - {file = "pyrsistent-0.18.1-cp39-cp39-win32.whl", hash = "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc"}, - {file = "pyrsistent-0.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07"}, - {file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"}, -] -pysaml2 = [ - {file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"}, - {file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"}, -] -python-dateutil = [ - {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, - {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, -] -pytz = [ - {file = "pytz-2021.3-py2.py3-none-any.whl", hash = "sha256:3672058bc3453457b622aab7a1c3bfd5ab0bdae451512f6cf25f64ed37f5b87c"}, - {file = "pytz-2021.3.tar.gz", hash = "sha256:acad2d8b20a1af07d4e4c9d2e9285c5ed9104354062f275f3fcd88dcef4f1326"}, -] -pywin32-ctypes = [ - {file = "pywin32-ctypes-0.2.0.tar.gz", hash = "sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942"}, - {file = "pywin32_ctypes-0.2.0-py2.py3-none-any.whl", hash = "sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98"}, -] -pyyaml = [ - {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, - {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, - {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, - {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, - {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, - {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, - {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, - {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, - {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, - {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, - {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, - {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, - {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, - {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, - {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, - {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, - {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, - {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, - {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, - {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, - {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, - {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, - {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, - {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, - {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, - {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, -] -readme-renderer = [ - {file = "readme_renderer-37.2-py3-none-any.whl", hash = "sha256:d3f06a69e8c40fca9ab3174eca48f96d9771eddb43517b17d96583418427b106"}, - {file = "readme_renderer-37.2.tar.gz", hash = "sha256:e8ad25293c98f781dbc2c5a36a309929390009f902f99e1798c761aaf04a7923"}, -] -requests = [ - {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, - {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"}, -] -requests-toolbelt = [ - {file = "requests-toolbelt-0.9.1.tar.gz", hash = "sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0"}, - {file = "requests_toolbelt-0.9.1-py2.py3-none-any.whl", hash = "sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f"}, -] -rfc3986 = [ - {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"}, - {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"}, -] -rich = [ - {file = "rich-12.6.0-py3-none-any.whl", hash = "sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e"}, - {file = "rich-12.6.0.tar.gz", hash = "sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0"}, -] -ruff = [ - {file = "ruff-0.0.215-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:a4963613bca6ffc448deca1ce3a3fc69af216d6234e5d7f256935d7407088724"}, - {file = "ruff-0.0.215-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:7bcd7b07a88c6530bb4e80850d6cf261081b9d4147eb0ea91fbb85a332ba4fe6"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf3fcbf717a1e0c480b3d1fe9fd823043af463f067ec896746dab2123c4dcf10"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8199acc4a20d2b3761c4489171f45f37654f2d5ce096361221ea392f078b4be0"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f68624344209d07000aba115eeac551f362e278970112f0b69838c70f77f7df"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f3c846df8a83445c394e6be58b8e784ec8fc82d67de94f137026c43e6037958b"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65b13019821af35a3225a64f2c93877c1e8059b92bb13fce32281ceefeecd199"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a02cb67a7171418c5a90ad0d8f983b5fd29b321c9861e0164d126cda4869c61"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d69e654d977842c327f26487ef9b7dba39204b113619d33b4139bd3fdd101c"}, - {file = "ruff-0.0.215-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ed2a0e13c822f8f0c40e6fe6172ff9c88add55a1dac9e0c05315618f82375648"}, - {file = "ruff-0.0.215-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1fd4b80bf34e20d18b01bf6d981973975184a85ed39f64934e11d00e2aba882f"}, - {file = "ruff-0.0.215-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5360b476b8720fa76d9dd6ee980c563b930a08524c91c99edddb25364ef656d7"}, - {file = "ruff-0.0.215-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:352534c0c2ffd491b331fe5982b1d3e88a6c2083a3c127466d4eed63918f6ea8"}, - {file = "ruff-0.0.215-py3-none-win32.whl", hash = "sha256:af3cd199a0c6f5b90b9c84a2b9b74b202901194b8b00d5d3e28a0a814037b73f"}, - {file = "ruff-0.0.215-py3-none-win_amd64.whl", hash = "sha256:aa6fe5b56b17a04c8db7f60fef21a9ff96109d10d9232b436ae2dfdc6cc70b7c"}, - {file = "ruff-0.0.215.tar.gz", hash = "sha256:a82ab1452396d5ca389bdcb182e8f273c5f7db854022d7a303764b6218e9e77e"}, -] -secretstorage = [ - {file = "SecretStorage-3.3.1-py3-none-any.whl", hash = "sha256:422d82c36172d88d6a0ed5afdec956514b189ddbfb72fefab0c8a1cee4eaf71f"}, - {file = "SecretStorage-3.3.1.tar.gz", hash = "sha256:fd666c51a6bf200643495a04abb261f83229dcb6fd8472ec393df7ffc8b6f195"}, -] -semantic-version = [ - {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"}, - {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, -] -sentry-sdk = [ - {file = "sentry-sdk-1.12.1.tar.gz", hash = "sha256:5bbe4b72de22f9ac1e67f2a4e6efe8fbd595bb59b7b223443f50fe5802a5551c"}, - {file = "sentry_sdk-1.12.1-py2.py3-none-any.whl", hash = "sha256:9f0b960694e2d8bb04db4ba6ac2a645040caef4e762c65937998ff06064f10d6"}, -] -service-identity = [ - {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"}, - {file = "service_identity-21.1.0-py2.py3-none-any.whl", hash = "sha256:f0b0caac3d40627c3c04d7a51b6e06721857a0e10a8775f2d1d7e72901b3a7db"}, -] -setuptools = [ - {file = "setuptools-65.5.1-py3-none-any.whl", hash = "sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31"}, - {file = "setuptools-65.5.1.tar.gz", hash = "sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f"}, -] -setuptools-rust = [ - {file = "setuptools-rust-1.5.2.tar.gz", hash = "sha256:d8daccb14dc0eae1b6b6eb3ecef79675bd37b4065369f79c35393dd5c55652c7"}, - {file = "setuptools_rust-1.5.2-py3-none-any.whl", hash = "sha256:8eb45851e34288f2296cd5ab9e924535ac1757318b730a13fe6836867843f206"}, -] -signedjson = [ - {file = "signedjson-1.1.4-py3-none-any.whl", hash = "sha256:45569ec54241c65d2403fe3faf7169be5322547706a231e884ca2b427f23d228"}, - {file = "signedjson-1.1.4.tar.gz", hash = "sha256:cd91c56af53f169ef032c62e9c4a3292dc158866933318d0592e3462db3d6492"}, -] -simplejson = [ - {file = "simplejson-3.17.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a89acae02b2975b1f8e4974cb8cdf9bf9f6c91162fb8dec50c259ce700f2770a"}, - {file = "simplejson-3.17.6-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:82ff356ff91be0ab2293fc6d8d262451eb6ac4fd999244c4b5f863e049ba219c"}, - {file = "simplejson-3.17.6-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:0de783e9c2b87bdd75b57efa2b6260c24b94605b5c9843517577d40ee0c3cc8a"}, - {file = "simplejson-3.17.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:d24a9e61df7a7787b338a58abfba975414937b609eb6b18973e25f573bc0eeeb"}, - {file = "simplejson-3.17.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:e8603e691580487f11306ecb066c76f1f4a8b54fb3bdb23fa40643a059509366"}, - {file = "simplejson-3.17.6-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:9b01e7b00654115965a206e3015f0166674ec1e575198a62a977355597c0bef5"}, - {file = "simplejson-3.17.6-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:37bc0cf0e5599f36072077e56e248f3336917ded1d33d2688624d8ed3cefd7d2"}, - {file = "simplejson-3.17.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:cf6e7d5fe2aeb54898df18db1baf479863eae581cce05410f61f6b4188c8ada1"}, - {file = "simplejson-3.17.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:bdfc54b4468ed4cd7415928cbe782f4d782722a81aeb0f81e2ddca9932632211"}, - {file = "simplejson-3.17.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dd16302d39c4d6f4afde80edd0c97d4db643327d355a312762ccd9bd2ca515ed"}, - {file = "simplejson-3.17.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:deac4bdafa19bbb89edfb73b19f7f69a52d0b5bd3bb0c4ad404c1bbfd7b4b7fd"}, - {file = "simplejson-3.17.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a8bbdb166e2fb816e43ab034c865147edafe28e1b19c72433147789ac83e2dda"}, - {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7854326920d41c3b5d468154318fe6ba4390cb2410480976787c640707e0180"}, - {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:04e31fa6ac8e326480703fb6ded1488bfa6f1d3f760d32e29dbf66d0838982ce"}, - {file = "simplejson-3.17.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f63600ec06982cdf480899026f4fda622776f5fabed9a869fdb32d72bc17e99a"}, - {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e03c3b8cc7883a54c3f34a6a135c4a17bc9088a33f36796acdb47162791b02f6"}, - {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a2d30d6c1652140181dc6861f564449ad71a45e4f165a6868c27d36745b65d40"}, - {file = "simplejson-3.17.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a1aa6e4cae8e3b8d5321be4f51c5ce77188faf7baa9fe1e78611f93a8eed2882"}, - {file = "simplejson-3.17.6-cp310-cp310-win32.whl", hash = "sha256:97202f939c3ff341fc3fa84d15db86156b1edc669424ba20b0a1fcd4a796a045"}, - {file = "simplejson-3.17.6-cp310-cp310-win_amd64.whl", hash = "sha256:80d3bc9944be1d73e5b1726c3bbfd2628d3d7fe2880711b1eb90b617b9b8ac70"}, - {file = "simplejson-3.17.6-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9fa621b3c0c05d965882c920347b6593751b7ab20d8fa81e426f1735ca1a9fc7"}, - {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd2fb11922f58df8528adfca123f6a84748ad17d066007e7ac977720063556bd"}, - {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:724c1fe135aa437d5126138d977004d165a3b5e2ee98fc4eb3e7c0ef645e7e27"}, - {file = "simplejson-3.17.6-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4ff4ac6ff3aa8f814ac0f50bf218a2e1a434a17aafad4f0400a57a8cc62ef17f"}, - {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:67093a526e42981fdd954868062e56c9b67fdd7e712616cc3265ad0c210ecb51"}, - {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:5d6b4af7ad7e4ac515bc6e602e7b79e2204e25dbd10ab3aa2beef3c5a9cad2c7"}, - {file = "simplejson-3.17.6-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:1c9b1ed7ed282b36571638297525f8ef80f34b3e2d600a56f962c6044f24200d"}, - {file = "simplejson-3.17.6-cp36-cp36m-win32.whl", hash = "sha256:632ecbbd2228575e6860c9e49ea3cc5423764d5aa70b92acc4e74096fb434044"}, - {file = "simplejson-3.17.6-cp36-cp36m-win_amd64.whl", hash = "sha256:4c09868ddb86bf79b1feb4e3e7e4a35cd6e61ddb3452b54e20cf296313622566"}, - {file = "simplejson-3.17.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b6bd8144f15a491c662f06814bd8eaa54b17f26095bb775411f39bacaf66837"}, - {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5decdc78849617917c206b01e9fc1d694fd58caa961be816cb37d3150d613d9a"}, - {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:521877c7bd060470806eb6335926e27453d740ac1958eaf0d8c00911bc5e1802"}, - {file = "simplejson-3.17.6-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:65b998193bd7b0c7ecdfffbc825d808eac66279313cb67d8892bb259c9d91494"}, - {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ac786f6cb7aa10d44e9641c7a7d16d7f6e095b138795cd43503769d4154e0dc2"}, - {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3ff5b3464e1ce86a8de8c88e61d4836927d5595c2162cab22e96ff551b916e81"}, - {file = "simplejson-3.17.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:69bd56b1d257a91e763256d63606937ae4eb890b18a789b66951c00062afec33"}, - {file = "simplejson-3.17.6-cp37-cp37m-win32.whl", hash = "sha256:b81076552d34c27e5149a40187a8f7e2abb2d3185576a317aaf14aeeedad862a"}, - {file = "simplejson-3.17.6-cp37-cp37m-win_amd64.whl", hash = "sha256:07ecaafc1b1501f275bf5acdee34a4ad33c7c24ede287183ea77a02dc071e0c0"}, - {file = "simplejson-3.17.6-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:068670af975247acbb9fc3d5393293368cda17026db467bf7a51548ee8f17ee1"}, - {file = "simplejson-3.17.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4d1c135af0c72cb28dd259cf7ba218338f4dc027061262e46fe058b4e6a4c6a3"}, - {file = "simplejson-3.17.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:23fe704da910ff45e72543cbba152821685a889cf00fc58d5c8ee96a9bad5f94"}, - {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f444762fed1bc1fd75187ef14a20ed900c1fbb245d45be9e834b822a0223bc81"}, - {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:681eb4d37c9a9a6eb9b3245a5e89d7f7b2b9895590bb08a20aa598c1eb0a1d9d"}, - {file = "simplejson-3.17.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:8e8607d8f6b4f9d46fee11447e334d6ab50e993dd4dbfb22f674616ce20907ab"}, - {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b10556817f09d46d420edd982dd0653940b90151d0576f09143a8e773459f6fe"}, - {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e1ec8a9ee0987d4524ffd6299e778c16cc35fef6d1a2764e609f90962f0b293a"}, - {file = "simplejson-3.17.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:0b4126cac7d69ac06ff22efd3e0b3328a4a70624fcd6bca4fc1b4e6d9e2e12bf"}, - {file = "simplejson-3.17.6-cp38-cp38-win32.whl", hash = "sha256:35a49ebef25f1ebdef54262e54ae80904d8692367a9f208cdfbc38dbf649e00a"}, - {file = "simplejson-3.17.6-cp38-cp38-win_amd64.whl", hash = "sha256:743cd768affaa508a21499f4858c5b824ffa2e1394ed94eb85caf47ac0732198"}, - {file = "simplejson-3.17.6-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:fb62d517a516128bacf08cb6a86ecd39fb06d08e7c4980251f5d5601d29989ba"}, - {file = "simplejson-3.17.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:12133863178a8080a3dccbf5cb2edfab0001bc41e5d6d2446af2a1131105adfe"}, - {file = "simplejson-3.17.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5540fba2d437edaf4aa4fbb80f43f42a8334206ad1ad3b27aef577fd989f20d9"}, - {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d74ee72b5071818a1a5dab47338e87f08a738cb938a3b0653b9e4d959ddd1fd9"}, - {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:28221620f4dcabdeac310846629b976e599a13f59abb21616356a85231ebd6ad"}, - {file = "simplejson-3.17.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b09bc62e5193e31d7f9876220fb429ec13a6a181a24d897b9edfbbdbcd678851"}, - {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7255a37ff50593c9b2f1afa8fafd6ef5763213c1ed5a9e2c6f5b9cc925ab979f"}, - {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:401d40969cee3df7bda211e57b903a534561b77a7ade0dd622a8d1a31eaa8ba7"}, - {file = "simplejson-3.17.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a649d0f66029c7eb67042b15374bd93a26aae202591d9afd71e111dd0006b198"}, - {file = "simplejson-3.17.6-cp39-cp39-win32.whl", hash = "sha256:522fad7be85de57430d6d287c4b635813932946ebf41b913fe7e880d154ade2e"}, - {file = "simplejson-3.17.6-cp39-cp39-win_amd64.whl", hash = "sha256:3fe87570168b2ae018391e2b43fbf66e8593a86feccb4b0500d134c998983ccc"}, - {file = "simplejson-3.17.6.tar.gz", hash = "sha256:cf98038d2abf63a1ada5730e91e84c642ba6c225b0198c3684151b1f80c5f8a6"}, -] -six = [ - {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, - {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, -] -smmap = [ - {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, - {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, -] -sortedcontainers = [ - {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, - {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, -] -systemd-python = [ - {file = "systemd-python-234.tar.gz", hash = "sha256:fd0e44bf70eadae45aadc292cb0a7eb5b0b6372cd1b391228047d33895db83e7"}, -] -threadloop = [ - {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, - {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, -] -thrift = [ - {file = "thrift-0.15.0.tar.gz", hash = "sha256:87c8205a71cf8bbb111cb99b1f7495070fbc9cabb671669568854210da5b3e29"}, -] -tomli = [ - {file = "tomli-1.2.3-py3-none-any.whl", hash = "sha256:e3069e4be3ead9668e21cb9b074cd948f7b3113fd9c8bba083f48247aab8b11c"}, - {file = "tomli-1.2.3.tar.gz", hash = "sha256:05b6166bff487dc068d322585c7ea4ef78deed501cc124060e0f238e89a9231f"}, -] -tornado = [ - {file = "tornado-6.1-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:d371e811d6b156d82aa5f9a4e08b58debf97c302a35714f6f45e35139c332e32"}, - {file = "tornado-6.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0d321a39c36e5f2c4ff12b4ed58d41390460f798422c4504e09eb5678e09998c"}, - {file = "tornado-6.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9de9e5188a782be6b1ce866e8a51bc76a0fbaa0e16613823fc38e4fc2556ad05"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:61b32d06ae8a036a6607805e6720ef00a3c98207038444ba7fd3d169cd998910"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:3e63498f680547ed24d2c71e6497f24bca791aca2fe116dbc2bd0ac7f191691b"}, - {file = "tornado-6.1-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:6c77c9937962577a6a76917845d06af6ab9197702a42e1346d8ae2e76b5e3675"}, - {file = "tornado-6.1-cp35-cp35m-win32.whl", hash = "sha256:6286efab1ed6e74b7028327365cf7346b1d777d63ab30e21a0f4d5b275fc17d5"}, - {file = "tornado-6.1-cp35-cp35m-win_amd64.whl", hash = "sha256:fa2ba70284fa42c2a5ecb35e322e68823288a4251f9ba9cc77be04ae15eada68"}, - {file = "tornado-6.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:0a00ff4561e2929a2c37ce706cb8233b7907e0cdc22eab98888aca5dd3775feb"}, - {file = "tornado-6.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:748290bf9112b581c525e6e6d3820621ff020ed95af6f17fedef416b27ed564c"}, - {file = "tornado-6.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:e385b637ac3acaae8022e7e47dfa7b83d3620e432e3ecb9a3f7f58f150e50921"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:25ad220258349a12ae87ede08a7b04aca51237721f63b1808d39bdb4b2164558"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:65d98939f1a2e74b58839f8c4dab3b6b3c1ce84972ae712be02845e65391ac7c"}, - {file = "tornado-6.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:e519d64089b0876c7b467274468709dadf11e41d65f63bba207e04217f47c085"}, - {file = "tornado-6.1-cp36-cp36m-win32.whl", hash = "sha256:b87936fd2c317b6ee08a5741ea06b9d11a6074ef4cc42e031bc6403f82a32575"}, - {file = "tornado-6.1-cp36-cp36m-win_amd64.whl", hash = "sha256:cc0ee35043162abbf717b7df924597ade8e5395e7b66d18270116f8745ceb795"}, - {file = "tornado-6.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7250a3fa399f08ec9cb3f7b1b987955d17e044f1ade821b32e5f435130250d7f"}, - {file = "tornado-6.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:ed3ad863b1b40cd1d4bd21e7498329ccaece75db5a5bf58cd3c9f130843e7102"}, - {file = "tornado-6.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:dcef026f608f678c118779cd6591c8af6e9b4155c44e0d1bc0c87c036fb8c8c4"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:70dec29e8ac485dbf57481baee40781c63e381bebea080991893cd297742b8fd"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:d3f7594930c423fd9f5d1a76bee85a2c36fd8b4b16921cae7e965f22575e9c01"}, - {file = "tornado-6.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:3447475585bae2e77ecb832fc0300c3695516a47d46cefa0528181a34c5b9d3d"}, - {file = "tornado-6.1-cp37-cp37m-win32.whl", hash = "sha256:e7229e60ac41a1202444497ddde70a48d33909e484f96eb0da9baf8dc68541df"}, - {file = "tornado-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:cb5ec8eead331e3bb4ce8066cf06d2dfef1bfb1b2a73082dfe8a161301b76e37"}, - {file = "tornado-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:20241b3cb4f425e971cb0a8e4ffc9b0a861530ae3c52f2b0434e6c1b57e9fd95"}, - {file = "tornado-6.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:c77da1263aa361938476f04c4b6c8916001b90b2c2fdd92d8d535e1af48fba5a"}, - {file = "tornado-6.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:fba85b6cd9c39be262fcd23865652920832b61583de2a2ca907dbd8e8a8c81e5"}, - {file = "tornado-6.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:1e8225a1070cd8eec59a996c43229fe8f95689cb16e552d130b9793cb570a288"}, - {file = "tornado-6.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:d14d30e7f46a0476efb0deb5b61343b1526f73ebb5ed84f23dc794bdb88f9d9f"}, - {file = "tornado-6.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:8f959b26f2634a091bb42241c3ed8d3cedb506e7c27b8dd5c7b9f745318ddbb6"}, - {file = "tornado-6.1-cp38-cp38-win32.whl", hash = "sha256:34ca2dac9e4d7afb0bed4677512e36a52f09caa6fded70b4e3e1c89dbd92c326"}, - {file = "tornado-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:6196a5c39286cc37c024cd78834fb9345e464525d8991c21e908cc046d1cc02c"}, - {file = "tornado-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0ba29bafd8e7e22920567ce0d232c26d4d47c8b5cf4ed7b562b5db39fa199c5"}, - {file = "tornado-6.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:33892118b165401f291070100d6d09359ca74addda679b60390b09f8ef325ffe"}, - {file = "tornado-6.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:7da13da6f985aab7f6f28debab00c67ff9cbacd588e8477034c0652ac141feea"}, - {file = "tornado-6.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:e0791ac58d91ac58f694d8d2957884df8e4e2f6687cdf367ef7eb7497f79eaa2"}, - {file = "tornado-6.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:66324e4e1beede9ac79e60f88de548da58b1f8ab4b2f1354d8375774f997e6c0"}, - {file = "tornado-6.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:a48900ecea1cbb71b8c71c620dee15b62f85f7c14189bdeee54966fbd9a0c5bd"}, - {file = "tornado-6.1-cp39-cp39-win32.whl", hash = "sha256:d3d20ea5782ba63ed13bc2b8c291a053c8d807a8fa927d941bd718468f7b950c"}, - {file = "tornado-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:548430be2740e327b3fe0201abe471f314741efcb0067ec4f2d7dcfb4825f3e4"}, - {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"}, -] -towncrier = [ - {file = "towncrier-22.12.0-py3-none-any.whl", hash = "sha256:9767a899a4d6856950f3598acd9e8f08da2663c49fdcda5ea0f9e6ba2afc8eea"}, - {file = "towncrier-22.12.0.tar.gz", hash = "sha256:9c49d7e75f646a9aea02ae904c0bc1639c8fd14a01292d2b123b8d307564034d"}, -] -treq = [ - {file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"}, - {file = "treq-22.2.0.tar.gz", hash = "sha256:df757e3f141fc782ede076a604521194ffcb40fa2645cf48e5a37060307f52ec"}, -] -twine = [ - {file = "twine-4.0.2-py3-none-any.whl", hash = "sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8"}, - {file = "twine-4.0.2.tar.gz", hash = "sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8"}, -] -twisted = [ - {file = "Twisted-22.10.0-py3-none-any.whl", hash = "sha256:86c55f712cc5ab6f6d64e02503352464f0400f66d4f079096d744080afcccbd0"}, - {file = "Twisted-22.10.0.tar.gz", hash = "sha256:32acbd40a94f5f46e7b42c109bfae2b302250945561783a8b7a059048f2d4d31"}, -] -twisted-iocpsupport = [ - {file = "twisted-iocpsupport-1.0.2.tar.gz", hash = "sha256:72068b206ee809c9c596b57b5287259ea41ddb4774d86725b19f35bf56aa32a9"}, - {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win32.whl", hash = "sha256:985c06a33f5c0dae92c71a036d1ea63872ee86a21dd9b01e1f287486f15524b4"}, - {file = "twisted_iocpsupport-1.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:81b3abe3527b367da0220482820cb12a16c661672b7bcfcde328902890d63323"}, - {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win32.whl", hash = "sha256:9dbb8823b49f06d4de52721b47de4d3b3026064ef4788ce62b1a21c57c3fff6f"}, - {file = "twisted_iocpsupport-1.0.2-cp36-cp36m-win_amd64.whl", hash = "sha256:b9fed67cf0f951573f06d560ac2f10f2a4bbdc6697770113a2fc396ea2cb2565"}, - {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win32.whl", hash = "sha256:b76b4eed9b27fd63ddb0877efdd2d15835fdcb6baa745cb85b66e5d016ac2878"}, - {file = "twisted_iocpsupport-1.0.2-cp37-cp37m-win_amd64.whl", hash = "sha256:851b3735ca7e8102e661872390e3bce88f8901bece95c25a0c8bb9ecb8a23d32"}, - {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win32.whl", hash = "sha256:bf4133139d77fc706d8f572e6b7d82871d82ec7ef25d685c2351bdacfb701415"}, - {file = "twisted_iocpsupport-1.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:306becd6e22ab6e8e4f36b6bdafd9c92e867c98a5ce517b27fdd27760ee7ae41"}, - {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win32.whl", hash = "sha256:3c61742cb0bc6c1ac117a7e5f422c129832f0c295af49e01d8a6066df8cfc04d"}, - {file = "twisted_iocpsupport-1.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:b435857b9efcbfc12f8c326ef0383f26416272260455bbca2cd8d8eca470c546"}, - {file = "twisted_iocpsupport-1.0.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:7d972cfa8439bdcb35a7be78b7ef86d73b34b808c74be56dfa785c8a93b851bf"}, -] -txredisapi = [ - {file = "txredisapi-1.4.7-py3-none-any.whl", hash = "sha256:34c9eba8d34f452d30661f073b67b8cd42b695e3d31678ec1bbf628a65a0f059"}, - {file = "txredisapi-1.4.7.tar.gz", hash = "sha256:e6cc43f51e35d608abdca8f8c7d20e148fe1d82679f6e584baea613ebec812bb"}, -] -typed-ast = [ - {file = "typed_ast-1.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:183b183b7771a508395d2cbffd6db67d6ad52958a5fdc99f450d954003900266"}, - {file = "typed_ast-1.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:676d051b1da67a852c0447621fdd11c4e104827417bf216092ec3e286f7da596"}, - {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc2542e83ac8399752bc16e0b35e038bdb659ba237f4222616b4e83fb9654985"}, - {file = "typed_ast-1.5.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:74cac86cc586db8dfda0ce65d8bcd2bf17b58668dfcc3652762f3ef0e6677e76"}, - {file = "typed_ast-1.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:18fe320f354d6f9ad3147859b6e16649a0781425268c4dde596093177660e71a"}, - {file = "typed_ast-1.5.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:31d8c6b2df19a777bc8826770b872a45a1f30cfefcfd729491baa5237faae837"}, - {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:963a0ccc9a4188524e6e6d39b12c9ca24cc2d45a71cfdd04a26d883c922b4b78"}, - {file = "typed_ast-1.5.2-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:0eb77764ea470f14fcbb89d51bc6bbf5e7623446ac4ed06cbd9ca9495b62e36e"}, - {file = "typed_ast-1.5.2-cp36-cp36m-win_amd64.whl", hash = "sha256:294a6903a4d087db805a7656989f613371915fc45c8cc0ddc5c5a0a8ad9bea4d"}, - {file = "typed_ast-1.5.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:26a432dc219c6b6f38be20a958cbe1abffcc5492821d7e27f08606ef99e0dffd"}, - {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7407cfcad702f0b6c0e0f3e7ab876cd1d2c13b14ce770e412c0c4b9728a0f88"}, - {file = "typed_ast-1.5.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f30ddd110634c2d7534b2d4e0e22967e88366b0d356b24de87419cc4410c41b7"}, - {file = "typed_ast-1.5.2-cp37-cp37m-win_amd64.whl", hash = "sha256:8c08d6625bb258179b6e512f55ad20f9dfef019bbfbe3095247401e053a3ea30"}, - {file = "typed_ast-1.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:90904d889ab8e81a956f2c0935a523cc4e077c7847a836abee832f868d5c26a4"}, - {file = "typed_ast-1.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:bbebc31bf11762b63bf61aaae232becb41c5bf6b3461b80a4df7e791fabb3aca"}, - {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29dd9a3a9d259c9fa19d19738d021632d673f6ed9b35a739f48e5f807f264fb"}, - {file = "typed_ast-1.5.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:58ae097a325e9bb7a684572d20eb3e1809802c5c9ec7108e85da1eb6c1a3331b"}, - {file = "typed_ast-1.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:da0a98d458010bf4fe535f2d1e367a2e2060e105978873c04c04212fb20543f7"}, - {file = "typed_ast-1.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:33b4a19ddc9fc551ebabca9765d54d04600c4a50eda13893dadf67ed81d9a098"}, - {file = "typed_ast-1.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1098df9a0592dd4c8c0ccfc2e98931278a6c6c53cb3a3e2cf7e9ee3b06153344"}, - {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42c47c3b43fe3a39ddf8de1d40dbbfca60ac8530a36c9b198ea5b9efac75c09e"}, - {file = "typed_ast-1.5.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f290617f74a610849bd8f5514e34ae3d09eafd521dceaa6cf68b3f4414266d4e"}, - {file = "typed_ast-1.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:df05aa5b241e2e8045f5f4367a9f6187b09c4cdf8578bb219861c4e27c443db5"}, - {file = "typed_ast-1.5.2.tar.gz", hash = "sha256:525a2d4088e70a9f75b08b3f87a51acc9cde640e19cc523c7e41aa355564ae27"}, -] -types-bleach = [ - {file = "types-bleach-5.0.3.1.tar.gz", hash = "sha256:ce8772ea5126dab1883851b41e3aeff229aa5213ced36096990344e632e92373"}, - {file = "types_bleach-5.0.3.1-py3-none-any.whl", hash = "sha256:af5f1b3a54ff279f54c29eccb2e6988ebb6718bc4061469588a5fd4880a79287"}, -] -types-commonmark = [ - {file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"}, - {file = "types_commonmark-0.9.2-py3-none-any.whl", hash = "sha256:56f20199a1f9a2924443211a0ef97f8b15a8a956a7f4e9186be6950bf38d6d02"}, -] -types-cryptography = [ - {file = "types-cryptography-3.3.15.tar.gz", hash = "sha256:a7983a75a7b88a18f88832008f0ef140b8d1097888ec1a0824ec8fb7e105273b"}, - {file = "types_cryptography-3.3.15-py3-none-any.whl", hash = "sha256:d9b0dd5465d7898d400850e7f35e5518aa93a7e23d3e11757cd81b4777089046"}, -] -types-docutils = [ - {file = "types-docutils-0.19.1.1.tar.gz", hash = "sha256:be0a51ba1c7dd215d9d2df66d6845e63c1009b4bbf4c5beb87a0d9745cdba962"}, - {file = "types_docutils-0.19.1.1-py3-none-any.whl", hash = "sha256:a024cada35f0c13cc45eb0b68a102719018a634013690b7fef723bcbfadbd1f1"}, -] -types-enum34 = [ - {file = "types-enum34-1.1.8.tar.gz", hash = "sha256:6f9c769641d06d73a55e11c14d38ac76fcd37eb545ce79cebb6eec9d50a64110"}, - {file = "types_enum34-1.1.8-py3-none-any.whl", hash = "sha256:05058c7a495f6bfaaca0be4aeac3cce5cdd80a2bad2aab01fd49a20bf4a0209d"}, -] -types-ipaddress = [ - {file = "types-ipaddress-1.0.8.tar.gz", hash = "sha256:a03df3be5935e50ba03fa843daabff539a041a28e73e0fce2c5705bee54d3841"}, - {file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"}, -] -types-jsonschema = [ - {file = "types-jsonschema-4.17.0.2.tar.gz", hash = "sha256:8b9e1140d4d780f0f19b5cab1b8a3732e8dd5e49dbc1f174cc0b499125ca6f6c"}, - {file = "types_jsonschema-4.17.0.2-py3-none-any.whl", hash = "sha256:8fd2f9aea4da54f9a811baa6963aac10fd680c18baa6237392c079b97d152738"}, -] -types-opentracing = [ - {file = "types-opentracing-2.4.10.tar.gz", hash = "sha256:6101414f3b6d3b9c10f1c510a261e8439b6c8d67c723d5c2872084697b4580a7"}, - {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"}, -] -types-pillow = [ - {file = "types-Pillow-9.4.0.0.tar.gz", hash = "sha256:ef8a823638ceb765a144a98a2f816b8912da0337c5c2556d33774f1434f9918c"}, - {file = "types_Pillow-9.4.0.0-py3-none-any.whl", hash = "sha256:246f0dc52d575ef64e01f06f41be37a492b542ee3180638a7b874a6dd4d48c01"}, -] -types-psycopg2 = [ - {file = "types-psycopg2-2.9.21.2.tar.gz", hash = "sha256:bff045579642ce00b4a3c8f2e401b7f96dfaa34939f10be64b0dd3b53feca57d"}, - {file = "types_psycopg2-2.9.21.2-py3-none-any.whl", hash = "sha256:084558d6bc4b2cfa249b06be0fdd9a14a69d307bae5bb5809a2f14cfbaa7a23f"}, -] -types-pyopenssl = [ - {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"}, - {file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"}, -] -types-pyyaml = [ - {file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"}, - {file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"}, -] -types-requests = [ - {file = "types-requests-2.28.11.7.tar.gz", hash = "sha256:0ae38633734990d019b80f5463dfa164ebd3581998ac8435f526da6fe4d598c3"}, - {file = "types_requests-2.28.11.7-py3-none-any.whl", hash = "sha256:b6a2fca8109f4fdba33052f11ed86102bddb2338519e1827387137fefc66a98b"}, -] -types-setuptools = [ - {file = "types-setuptools-65.6.0.3.tar.gz", hash = "sha256:7ddd7415282fa97ab18e490206067c0cdb126b103743e72ee86783d7af6481c5"}, - {file = "types_setuptools-65.6.0.3-py3-none-any.whl", hash = "sha256:ad729fc3a9a3946f73915eaab16ce56b30ed5ae998479253d809d76b3889ee09"}, -] -types-urllib3 = [ - {file = "types-urllib3-1.26.10.tar.gz", hash = "sha256:a26898f530e6c3f43f25b907f2b884486868ffd56a9faa94cbf9b3eb6e165d6a"}, - {file = "types_urllib3-1.26.10-py3-none-any.whl", hash = "sha256:d755278d5ecd7a7a6479a190e54230f241f1a99c19b81518b756b19dc69e518c"}, -] -typing-extensions = [ - {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, - {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, -] -unpaddedbase64 = [ - {file = "unpaddedbase64-2.1.0-py3-none-any.whl", hash = "sha256:485eff129c30175d2cd6f0cd8d2310dff51e666f7f36175f738d75dfdbd0b1c6"}, - {file = "unpaddedbase64-2.1.0.tar.gz", hash = "sha256:7273c60c089de39d90f5d6d4a7883a79e319dc9d9b1c8924a7fab96178a5f005"}, -] -urllib3 = [ - {file = "urllib3-1.26.12-py2.py3-none-any.whl", hash = "sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997"}, - {file = "urllib3-1.26.12.tar.gz", hash = "sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e"}, -] -webencodings = [ - {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, - {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, -] -wrapt = [ - {file = "wrapt-1.14.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:1b376b3f4896e7930f1f772ac4b064ac12598d1c38d04907e696cc4d794b43d3"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:903500616422a40a98a5a3c4ff4ed9d0066f3b4c951fa286018ecdf0750194ef"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5a9a0d155deafd9448baff28c08e150d9b24ff010e899311ddd63c45c2445e28"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ddaea91abf8b0d13443f6dac52e89051a5063c7d014710dcb4d4abb2ff811a59"}, - {file = "wrapt-1.14.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:36f582d0c6bc99d5f39cd3ac2a9062e57f3cf606ade29a0a0d6b323462f4dd87"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:7ef58fb89674095bfc57c4069e95d7a31cfdc0939e2a579882ac7d55aadfd2a1"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:e2f83e18fe2f4c9e7db597e988f72712c0c3676d337d8b101f6758107c42425b"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ee2b1b1769f6707a8a445162ea16dddf74285c3964f605877a20e38545c3c462"}, - {file = "wrapt-1.14.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:833b58d5d0b7e5b9832869f039203389ac7cbf01765639c7309fd50ef619e0b1"}, - {file = "wrapt-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80bb5c256f1415f747011dc3604b59bc1f91c6e7150bd7db03b19170ee06b320"}, - {file = "wrapt-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:07f7a7d0f388028b2df1d916e94bbb40624c59b48ecc6cbc232546706fac74c2"}, - {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02b41b633c6261feff8ddd8d11c711df6842aba629fdd3da10249a53211a72c4"}, - {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fe803deacd09a233e4762a1adcea5db5d31e6be577a43352936179d14d90069"}, - {file = "wrapt-1.14.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:257fd78c513e0fb5cdbe058c27a0624c9884e735bbd131935fd49e9fe719d310"}, - {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4fcc4649dc762cddacd193e6b55bc02edca674067f5f98166d7713b193932b7f"}, - {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:11871514607b15cfeb87c547a49bca19fde402f32e2b1c24a632506c0a756656"}, - {file = "wrapt-1.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8ad85f7f4e20964db4daadcab70b47ab05c7c1cf2a7c1e51087bfaa83831854c"}, - {file = "wrapt-1.14.1-cp310-cp310-win32.whl", hash = "sha256:a9a52172be0b5aae932bef82a79ec0a0ce87288c7d132946d645eba03f0ad8a8"}, - {file = "wrapt-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:6d323e1554b3d22cfc03cd3243b5bb815a51f5249fdcbb86fda4bf62bab9e164"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:43ca3bbbe97af00f49efb06e352eae40434ca9d915906f77def219b88e85d907"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:6b1a564e6cb69922c7fe3a678b9f9a3c54e72b469875aa8018f18b4d1dd1adf3"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:00b6d4ea20a906c0ca56d84f93065b398ab74b927a7a3dbd470f6fc503f95dc3"}, - {file = "wrapt-1.14.1-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:a85d2b46be66a71bedde836d9e41859879cc54a2a04fad1191eb50c2066f6e9d"}, - {file = "wrapt-1.14.1-cp35-cp35m-win32.whl", hash = "sha256:dbcda74c67263139358f4d188ae5faae95c30929281bc6866d00573783c422b7"}, - {file = "wrapt-1.14.1-cp35-cp35m-win_amd64.whl", hash = "sha256:b21bb4c09ffabfa0e85e3a6b623e19b80e7acd709b9f91452b8297ace2a8ab00"}, - {file = "wrapt-1.14.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:9e0fd32e0148dd5dea6af5fee42beb949098564cc23211a88d799e434255a1f4"}, - {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9736af4641846491aedb3c3f56b9bc5568d92b0692303b5a305301a95dfd38b1"}, - {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5b02d65b9ccf0ef6c34cba6cf5bf2aab1bb2f49c6090bafeecc9cd81ad4ea1c1"}, - {file = "wrapt-1.14.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21ac0156c4b089b330b7666db40feee30a5d52634cc4560e1905d6529a3897ff"}, - {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:9f3e6f9e05148ff90002b884fbc2a86bd303ae847e472f44ecc06c2cd2fcdb2d"}, - {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:6e743de5e9c3d1b7185870f480587b75b1cb604832e380d64f9504a0535912d1"}, - {file = "wrapt-1.14.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:d79d7d5dc8a32b7093e81e97dad755127ff77bcc899e845f41bf71747af0c569"}, - {file = "wrapt-1.14.1-cp36-cp36m-win32.whl", hash = "sha256:81b19725065dcb43df02b37e03278c011a09e49757287dca60c5aecdd5a0b8ed"}, - {file = "wrapt-1.14.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b014c23646a467558be7da3d6b9fa409b2c567d2110599b7cf9a0c5992b3b471"}, - {file = "wrapt-1.14.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:88bd7b6bd70a5b6803c1abf6bca012f7ed963e58c68d76ee20b9d751c74a3248"}, - {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5901a312f4d14c59918c221323068fad0540e34324925c8475263841dbdfe68"}, - {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d77c85fedff92cf788face9bfa3ebaa364448ebb1d765302e9af11bf449ca36d"}, - {file = "wrapt-1.14.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d649d616e5c6a678b26d15ece345354f7c2286acd6db868e65fcc5ff7c24a77"}, - {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:7d2872609603cb35ca513d7404a94d6d608fc13211563571117046c9d2bcc3d7"}, - {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:ee6acae74a2b91865910eef5e7de37dc6895ad96fa23603d1d27ea69df545015"}, - {file = "wrapt-1.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b39d38039a1fdad98c87279b48bc5dce2c0ca0d73483b12cb72aa9609278e8a"}, - {file = "wrapt-1.14.1-cp37-cp37m-win32.whl", hash = "sha256:60db23fa423575eeb65ea430cee741acb7c26a1365d103f7b0f6ec412b893853"}, - {file = "wrapt-1.14.1-cp37-cp37m-win_amd64.whl", hash = "sha256:709fe01086a55cf79d20f741f39325018f4df051ef39fe921b1ebe780a66184c"}, - {file = "wrapt-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8c0ce1e99116d5ab21355d8ebe53d9460366704ea38ae4d9f6933188f327b456"}, - {file = "wrapt-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e3fb1677c720409d5f671e39bac6c9e0e422584e5f518bfd50aa4cbbea02433f"}, - {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:642c2e7a804fcf18c222e1060df25fc210b9c58db7c91416fb055897fc27e8cc"}, - {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b7c050ae976e286906dd3f26009e117eb000fb2cf3533398c5ad9ccc86867b1"}, - {file = "wrapt-1.14.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef3f72c9666bba2bab70d2a8b79f2c6d2c1a42a7f7e2b0ec83bb2f9e383950af"}, - {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:01c205616a89d09827986bc4e859bcabd64f5a0662a7fe95e0d359424e0e071b"}, - {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5a0f54ce2c092aaf439813735584b9537cad479575a09892b8352fea5e988dc0"}, - {file = "wrapt-1.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2cf71233a0ed05ccdabe209c606fe0bac7379fdcf687f39b944420d2a09fdb57"}, - {file = "wrapt-1.14.1-cp38-cp38-win32.whl", hash = "sha256:aa31fdcc33fef9eb2552cbcbfee7773d5a6792c137b359e82879c101e98584c5"}, - {file = "wrapt-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1967f46ea8f2db647c786e78d8cc7e4313dbd1b0aca360592d8027b8508e24d"}, - {file = "wrapt-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3232822c7d98d23895ccc443bbdf57c7412c5a65996c30442ebe6ed3df335383"}, - {file = "wrapt-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:988635d122aaf2bdcef9e795435662bcd65b02f4f4c1ae37fbee7401c440b3a7"}, - {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cca3c2cdadb362116235fdbd411735de4328c61425b0aa9f872fd76d02c4e86"}, - {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d52a25136894c63de15a35bc0bdc5adb4b0e173b9c0d07a2be9d3ca64a332735"}, - {file = "wrapt-1.14.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40e7bc81c9e2b2734ea4bc1aceb8a8f0ceaac7c5299bc5d69e37c44d9081d43b"}, - {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b9b7a708dd92306328117d8c4b62e2194d00c365f18eff11a9b53c6f923b01e3"}, - {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6a9a25751acb379b466ff6be78a315e2b439d4c94c1e99cb7266d40a537995d3"}, - {file = "wrapt-1.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:34aa51c45f28ba7f12accd624225e2b1e5a3a45206aa191f6f9aac931d9d56fe"}, - {file = "wrapt-1.14.1-cp39-cp39-win32.whl", hash = "sha256:dee0ce50c6a2dd9056c20db781e9c1cfd33e77d2d569f5d1d9321c641bb903d5"}, - {file = "wrapt-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:dee60e1de1898bde3b238f18340eec6148986da0455d8ba7848d50470a7a32fb"}, - {file = "wrapt-1.14.1.tar.gz", hash = "sha256:380a85cf89e0e69b7cfbe2ea9f765f004ff419f34194018a6827ac0e3edfed4d"}, -] -xmlschema = [ - {file = "xmlschema-1.10.0-py3-none-any.whl", hash = "sha256:dbd68bded2fef00c19cf37110ca0565eca34cf0b6c9e1d3b62ad0de8cbb582ca"}, - {file = "xmlschema-1.10.0.tar.gz", hash = "sha256:be1eedce6a4b911fd3a7f4060d0811951820a13410e61f0454b30e9f4e7cf197"}, -] -zipp = [ - {file = "zipp-3.7.0-py3-none-any.whl", hash = "sha256:b47250dd24f92b7dd6a0a8fc5244da14608f3ca90a5efcd37a3b1642fac9a375"}, - {file = "zipp-3.7.0.tar.gz", hash = "sha256:9f50f446828eb9d45b267433fd3e9da8d801f614129124863f9c51ebceafb87d"}, -] -"zope.event" = [ - {file = "zope.event-4.5.0-py2.py3-none-any.whl", hash = "sha256:2666401939cdaa5f4e0c08cf7f20c9b21423b95e88f4675b1443973bdb080c42"}, - {file = "zope.event-4.5.0.tar.gz", hash = "sha256:5e76517f5b9b119acf37ca8819781db6c16ea433f7e2062c4afc2b6fbedb1330"}, -] -"zope.interface" = [ - {file = "zope.interface-5.4.0-cp27-cp27m-macosx_10_14_x86_64.whl", hash = "sha256:7df1e1c05304f26faa49fa752a8c690126cf98b40b91d54e6e9cc3b7d6ffe8b7"}, - {file = "zope.interface-5.4.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:2c98384b254b37ce50eddd55db8d381a5c53b4c10ee66e1e7fe749824f894021"}, - {file = "zope.interface-5.4.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:08f9636e99a9d5410181ba0729e0408d3d8748026ea938f3b970a0249daa8192"}, - {file = "zope.interface-5.4.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:0ea1d73b7c9dcbc5080bb8aaffb776f1c68e807767069b9ccdd06f27a161914a"}, - {file = "zope.interface-5.4.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:273f158fabc5ea33cbc936da0ab3d4ba80ede5351babc4f577d768e057651531"}, - {file = "zope.interface-5.4.0-cp27-cp27m-win32.whl", hash = "sha256:a1e6e96217a0f72e2b8629e271e1b280c6fa3fe6e59fa8f6701bec14e3354325"}, - {file = "zope.interface-5.4.0-cp27-cp27m-win_amd64.whl", hash = "sha256:877473e675fdcc113c138813a5dd440da0769a2d81f4d86614e5d62b69497155"}, - {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f7ee479e96f7ee350db1cf24afa5685a5899e2b34992fb99e1f7c1b0b758d263"}, - {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:b0297b1e05fd128d26cc2460c810d42e205d16d76799526dfa8c8ccd50e74959"}, - {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:af310ec8335016b5e52cae60cda4a4f2a60a788cbb949a4fbea13d441aa5a09e"}, - {file = "zope.interface-5.4.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:9a9845c4c6bb56e508651f005c4aeb0404e518c6f000d5a1123ab077ab769f5c"}, - {file = "zope.interface-5.4.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:0b465ae0962d49c68aa9733ba92a001b2a0933c317780435f00be7ecb959c702"}, - {file = "zope.interface-5.4.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:5dd9ca406499444f4c8299f803d4a14edf7890ecc595c8b1c7115c2342cadc5f"}, - {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:469e2407e0fe9880ac690a3666f03eb4c3c444411a5a5fddfdabc5d184a79f05"}, - {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:52de7fc6c21b419078008f697fd4103dbc763288b1406b4562554bd47514c004"}, - {file = "zope.interface-5.4.0-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:3dd4952748521205697bc2802e4afac5ed4b02909bb799ba1fe239f77fd4e117"}, - {file = "zope.interface-5.4.0-cp35-cp35m-win32.whl", hash = "sha256:dd93ea5c0c7f3e25335ab7d22a507b1dc43976e1345508f845efc573d3d779d8"}, - {file = "zope.interface-5.4.0-cp35-cp35m-win_amd64.whl", hash = "sha256:3748fac0d0f6a304e674955ab1365d515993b3a0a865e16a11ec9d86fb307f63"}, - {file = "zope.interface-5.4.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:66c0061c91b3b9cf542131148ef7ecbecb2690d48d1612ec386de9d36766058f"}, - {file = "zope.interface-5.4.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:d0c1bc2fa9a7285719e5678584f6b92572a5b639d0e471bb8d4b650a1a910920"}, - {file = "zope.interface-5.4.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2876246527c91e101184f63ccd1d716ec9c46519cc5f3d5375a3351c46467c46"}, - {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:334701327f37c47fa628fc8b8d28c7d7730ce7daaf4bda1efb741679c2b087fc"}, - {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:71aace0c42d53abe6fc7f726c5d3b60d90f3c5c055a447950ad6ea9cec2e37d9"}, - {file = "zope.interface-5.4.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:5bb3489b4558e49ad2c5118137cfeaf59434f9737fa9c5deefc72d22c23822e2"}, - {file = "zope.interface-5.4.0-cp36-cp36m-win32.whl", hash = "sha256:1c0e316c9add0db48a5b703833881351444398b04111188069a26a61cfb4df78"}, - {file = "zope.interface-5.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:6f0c02cbb9691b7c91d5009108f975f8ffeab5dff8f26d62e21c493060eff2a1"}, - {file = "zope.interface-5.4.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:7d97a4306898b05404a0dcdc32d9709b7d8832c0c542b861d9a826301719794e"}, - {file = "zope.interface-5.4.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:867a5ad16892bf20e6c4ea2aab1971f45645ff3102ad29bd84c86027fa99997b"}, - {file = "zope.interface-5.4.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:5f931a1c21dfa7a9c573ec1f50a31135ccce84e32507c54e1ea404894c5eb96f"}, - {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:194d0bcb1374ac3e1e023961610dc8f2c78a0f5f634d0c737691e215569e640d"}, - {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:8270252effc60b9642b423189a2fe90eb6b59e87cbee54549db3f5562ff8d1b8"}, - {file = "zope.interface-5.4.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:15e7d1f7a6ee16572e21e3576d2012b2778cbacf75eb4b7400be37455f5ca8bf"}, - {file = "zope.interface-5.4.0-cp37-cp37m-win32.whl", hash = "sha256:8892f89999ffd992208754851e5a052f6b5db70a1e3f7d54b17c5211e37a98c7"}, - {file = "zope.interface-5.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2e5a26f16503be6c826abca904e45f1a44ff275fdb7e9d1b75c10671c26f8b94"}, - {file = "zope.interface-5.4.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:0f91b5b948686659a8e28b728ff5e74b1be6bf40cb04704453617e5f1e945ef3"}, - {file = "zope.interface-5.4.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:4de4bc9b6d35c5af65b454d3e9bc98c50eb3960d5a3762c9438df57427134b8e"}, - {file = "zope.interface-5.4.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:bf68f4b2b6683e52bec69273562df15af352e5ed25d1b6641e7efddc5951d1a7"}, - {file = "zope.interface-5.4.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:63b82bb63de7c821428d513607e84c6d97d58afd1fe2eb645030bdc185440120"}, - {file = "zope.interface-5.4.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:db1fa631737dab9fa0b37f3979d8d2631e348c3b4e8325d6873c2541d0ae5a48"}, - {file = "zope.interface-5.4.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:f44e517131a98f7a76696a7b21b164bcb85291cee106a23beccce454e1f433a4"}, - {file = "zope.interface-5.4.0-cp38-cp38-win32.whl", hash = "sha256:a9506a7e80bcf6eacfff7f804c0ad5350c8c95b9010e4356a4b36f5322f09abb"}, - {file = "zope.interface-5.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:3c02411a3b62668200910090a0dff17c0b25aaa36145082a5a6adf08fa281e54"}, - {file = "zope.interface-5.4.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:0cee5187b60ed26d56eb2960136288ce91bcf61e2a9405660d271d1f122a69a4"}, - {file = "zope.interface-5.4.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:a8156e6a7f5e2a0ff0c5b21d6bcb45145efece1909efcbbbf48c56f8da68221d"}, - {file = "zope.interface-5.4.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:205e40ccde0f37496904572035deea747390a8b7dc65146d30b96e2dd1359a83"}, - {file = "zope.interface-5.4.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:3f24df7124c323fceb53ff6168da70dbfbae1442b4f3da439cd441681f54fe25"}, - {file = "zope.interface-5.4.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:5208ebd5152e040640518a77827bdfcc73773a15a33d6644015b763b9c9febc1"}, - {file = "zope.interface-5.4.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:17776ecd3a1fdd2b2cd5373e5ef8b307162f581c693575ec62e7c5399d80794c"}, - {file = "zope.interface-5.4.0-cp39-cp39-win32.whl", hash = "sha256:d4d9d6c1a455d4babd320203b918ccc7fcbefe308615c521062bc2ba1aa4d26e"}, - {file = "zope.interface-5.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:0cba8477e300d64a11a9789ed40ee8932b59f9ee05f85276dbb4b59acee5dd09"}, - {file = "zope.interface-5.4.0.tar.gz", hash = "sha256:5dba5f530fec3f0988d83b78cc591b58c0b6eb8431a85edd1569a0539a8a5a0e"}, -] -"zope.schema" = [ - {file = "zope.schema-6.2.0-py2.py3-none-any.whl", hash = "sha256:03150d8670549590b45109e06b7b964f4e751fa9cb5297ec4985c3bc38641b07"}, - {file = "zope.schema-6.2.0.tar.gz", hash = "sha256:2201aef8ad75ee5a881284d7a6acd384661d6dca7bde5e80a22839a77124595b"}, -] From 4389b8518f943079744783d5e0d18da48e6256d7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 18:28:04 +0000 Subject: [PATCH 032/344] Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2 (#14861) * Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2 Bumps [peaceiris/actions-gh-pages](https://github.com/peaceiris/actions-gh-pages) from 3.9.1 to 3.9.2. - [Release notes](https://github.com/peaceiris/actions-gh-pages/releases) - [Changelog](https://github.com/peaceiris/actions-gh-pages/blob/main/CHANGELOG.md) - [Commits](https://github.com/peaceiris/actions-gh-pages/compare/64b46b4226a4a12da2239ba3ea5aa73e3163c75b...bd8c6b06eba6b3d25d72b7a1767993c0aeee42e7) --- updated-dependencies: - dependency-name: peaceiris/actions-gh-pages dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/docs.yaml | 2 +- changelog.d/14861.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14861.misc diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 0b33058337b9..55b4b287f677 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -58,7 +58,7 @@ jobs: # Deploy to the target directory. - name: Deploy to gh pages - uses: peaceiris/actions-gh-pages@64b46b4226a4a12da2239ba3ea5aa73e3163c75b # v3.9.1 + uses: peaceiris/actions-gh-pages@bd8c6b06eba6b3d25d72b7a1767993c0aeee42e7 # v3.9.2 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: ./book diff --git a/changelog.d/14861.misc b/changelog.d/14861.misc new file mode 100644 index 000000000000..44813061140d --- /dev/null +++ b/changelog.d/14861.misc @@ -0,0 +1 @@ +Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2. From a34682f7d6509dfc344d6f4ce37fc2eea0102401 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 18:39:50 +0000 Subject: [PATCH 033/344] Bump types-pillow from 9.4.0.0 to 9.4.0.3 (#14863) * Bump types-pillow from 9.4.0.0 to 9.4.0.3 Bumps [types-pillow](https://github.com/python/typeshed) from 9.4.0.0 to 9.4.0.3. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pillow dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14863.misc | 1 + poetry.lock | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14863.misc diff --git a/changelog.d/14863.misc b/changelog.d/14863.misc new file mode 100644 index 000000000000..9dc092256c9d --- /dev/null +++ b/changelog.d/14863.misc @@ -0,0 +1 @@ +Bump types-pillow from 9.4.0.0 to 9.4.0.3. diff --git a/poetry.lock b/poetry.lock index dffe1e0f7635..1d49588ab35a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1643,7 +1643,7 @@ files = [ cffi = ">=1.4.1" [package.extras] -docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] @@ -2581,14 +2581,14 @@ files = [ [[package]] name = "types-pillow" -version = "9.4.0.0" +version = "9.4.0.3" description = "Typing stubs for Pillow" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-Pillow-9.4.0.0.tar.gz", hash = "sha256:ef8a823638ceb765a144a98a2f816b8912da0337c5c2556d33774f1434f9918c"}, - {file = "types_Pillow-9.4.0.0-py3-none-any.whl", hash = "sha256:246f0dc52d575ef64e01f06f41be37a492b542ee3180638a7b874a6dd4d48c01"}, + {file = "types-Pillow-9.4.0.3.tar.gz", hash = "sha256:eba8ff24457a1b8669b6099793f3d313d034d407ee9f6e5fdf12c86cf54914cd"}, + {file = "types_Pillow-9.4.0.3-py3-none-any.whl", hash = "sha256:f8f16a54ed315144296864df11f14beca82ec0990ea83710b7eac7eb1bb38971"}, ] [[package]] From 3a777e7dc2de4b36089e3837a836a843e8e4d992 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 18:47:47 +0000 Subject: [PATCH 034/344] Bump ruff from 0.0.215 to 0.0.224 (#14862) * Bump ruff from 0.0.215 to 0.0.224 Bumps [ruff](https://github.com/charliermarsh/ruff) from 0.0.215 to 0.0.224. - [Release notes](https://github.com/charliermarsh/ruff/releases) - [Changelog](https://github.com/charliermarsh/ruff/blob/main/BREAKING_CHANGES.md) - [Commits](https://github.com/charliermarsh/ruff/compare/v0.0.215...v0.0.224) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14862.misc | 1 + poetry.lock | 36 ++++++++++++++++++------------------ pyproject.toml | 2 +- 3 files changed, 20 insertions(+), 19 deletions(-) create mode 100644 changelog.d/14862.misc diff --git a/changelog.d/14862.misc b/changelog.d/14862.misc new file mode 100644 index 000000000000..b18147669b30 --- /dev/null +++ b/changelog.d/14862.misc @@ -0,0 +1 @@ +Bump ruff from 0.0.215 to 0.0.224. diff --git a/poetry.lock b/poetry.lock index 1d49588ab35a..3838d3b34bf9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1906,28 +1906,28 @@ jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] [[package]] name = "ruff" -version = "0.0.215" +version = "0.0.224" description = "An extremely fast Python linter, written in Rust." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.215-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:a4963613bca6ffc448deca1ce3a3fc69af216d6234e5d7f256935d7407088724"}, - {file = "ruff-0.0.215-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:7bcd7b07a88c6530bb4e80850d6cf261081b9d4147eb0ea91fbb85a332ba4fe6"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf3fcbf717a1e0c480b3d1fe9fd823043af463f067ec896746dab2123c4dcf10"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8199acc4a20d2b3761c4489171f45f37654f2d5ce096361221ea392f078b4be0"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f68624344209d07000aba115eeac551f362e278970112f0b69838c70f77f7df"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:f3c846df8a83445c394e6be58b8e784ec8fc82d67de94f137026c43e6037958b"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65b13019821af35a3225a64f2c93877c1e8059b92bb13fce32281ceefeecd199"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a02cb67a7171418c5a90ad0d8f983b5fd29b321c9861e0164d126cda4869c61"}, - {file = "ruff-0.0.215-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d69e654d977842c327f26487ef9b7dba39204b113619d33b4139bd3fdd101c"}, - {file = "ruff-0.0.215-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ed2a0e13c822f8f0c40e6fe6172ff9c88add55a1dac9e0c05315618f82375648"}, - {file = "ruff-0.0.215-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:1fd4b80bf34e20d18b01bf6d981973975184a85ed39f64934e11d00e2aba882f"}, - {file = "ruff-0.0.215-py3-none-musllinux_1_2_i686.whl", hash = "sha256:5360b476b8720fa76d9dd6ee980c563b930a08524c91c99edddb25364ef656d7"}, - {file = "ruff-0.0.215-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:352534c0c2ffd491b331fe5982b1d3e88a6c2083a3c127466d4eed63918f6ea8"}, - {file = "ruff-0.0.215-py3-none-win32.whl", hash = "sha256:af3cd199a0c6f5b90b9c84a2b9b74b202901194b8b00d5d3e28a0a814037b73f"}, - {file = "ruff-0.0.215-py3-none-win_amd64.whl", hash = "sha256:aa6fe5b56b17a04c8db7f60fef21a9ff96109d10d9232b436ae2dfdc6cc70b7c"}, - {file = "ruff-0.0.215.tar.gz", hash = "sha256:a82ab1452396d5ca389bdcb182e8f273c5f7db854022d7a303764b6218e9e77e"}, + {file = "ruff-0.0.224-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:015277c45716733e99a19267fd244870bff2b619d4814065fe5cded5ab139b92"}, + {file = "ruff-0.0.224-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:ca8211b316fa2df70d90e38819862d58e4aec87643c2c343ba5102a7ff5d3221"}, + {file = "ruff-0.0.224-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:637a502a37da3aac9832a0dd21255376b0320cf66e3d420d11000e09deb3e682"}, + {file = "ruff-0.0.224-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a85fe53b8193c3e9f7ca9bef7dfd3bcd079a86542a14b66f352ce0316de5c457"}, + {file = "ruff-0.0.224-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:367c74a9ff9da165df7dc1e5e1fae5c4cda05cb94202bc9a6e5836243bc625c1"}, + {file = "ruff-0.0.224-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2d230fc497abdeb3b54d2808ba59802962b50e0611b558337d4c07e6d490ed6c"}, + {file = "ruff-0.0.224-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3df2bcb525fec6c2d551f7ab9843e42e8e4fa33586479953445ef64cbe6d6352"}, + {file = "ruff-0.0.224-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:30e494a23c2f23a07adbd4a9df526057a8cdb4c88536bbc513b5a73385c3d5a7"}, + {file = "ruff-0.0.224-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eac5836f89f1388b7bb718a5c77cdd13e356737573d29581a18d7f575e42124c"}, + {file = "ruff-0.0.224-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d791073cfd40c8e697d4c79faa67e2ad54dc960854bfa0c0cba61ef4bb02d3b1"}, + {file = "ruff-0.0.224-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ef72a081dfe24bfb8aa0568e0f1ee0174fffbc6ebb0ae2b8b4cd3f9457cc867b"}, + {file = "ruff-0.0.224-py3-none-musllinux_1_2_i686.whl", hash = "sha256:666ecfcf0019f5b0e72e0eba7a7330760b680ba0fb6413b139a594b117e612db"}, + {file = "ruff-0.0.224-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:815d5d448e2bf5107340d6f47cbddac74186cb365c56bdcc2315fbcf59ebc466"}, + {file = "ruff-0.0.224-py3-none-win32.whl", hash = "sha256:17d98c1f03e98c15d3f2c49e0ffdedc57b221217c4e3d7b6f732893101083240"}, + {file = "ruff-0.0.224-py3-none-win_amd64.whl", hash = "sha256:3db4fe992cea69405061e09974c3955b750611b1e76161471c27cd2e8ccffa05"}, + {file = "ruff-0.0.224.tar.gz", hash = "sha256:3b07c2e8da29605a8577b1aef90f8ca0c34a66683b77b06007f1970bc0689003"}, ] [[package]] @@ -2963,4 +2963,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "53867af07a507c3addd614c828dfb26175f6604398848e84c0ea65980f8a59a2" +content-hash = "38867861f77c6faca817487efd02fbb7271b424d56744ad9ad248cd1dd297566" diff --git a/pyproject.toml b/pyproject.toml index 15dc010fd823..d54dde4a2ffe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.215" +ruff = "0.0.224" # Typechecking mypy = "*" From f1135a793034cce5ad7dbceaad76dc68b5061e60 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 19:01:29 +0000 Subject: [PATCH 035/344] Bump sentry-sdk from 1.12.1 to 1.13.0 (#14852) Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 1.12.1 to 1.13.0. - [Release notes](https://github.com/getsentry/sentry-python/releases) - [Changelog](https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-python/compare/1.12.1...1.13.0) --- updated-dependencies: - dependency-name: sentry-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3838d3b34bf9..46c83da7b595 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1964,14 +1964,14 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.12.1" +version = "1.13.0" description = "Python client for Sentry (https://sentry.io)" category = "main" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.12.1.tar.gz", hash = "sha256:5bbe4b72de22f9ac1e67f2a4e6efe8fbd595bb59b7b223443f50fe5802a5551c"}, - {file = "sentry_sdk-1.12.1-py2.py3-none-any.whl", hash = "sha256:9f0b960694e2d8bb04db4ba6ac2a645040caef4e762c65937998ff06064f10d6"}, + {file = "sentry-sdk-1.13.0.tar.gz", hash = "sha256:72da0766c3069a3941eadbdfa0996f83f5a33e55902a19ba399557cfee1dddcc"}, + {file = "sentry_sdk-1.13.0-py2.py3-none-any.whl", hash = "sha256:b7ff6318183e551145b5c4766eb65b59ad5b63ff234dffddc5fb50340cad6729"}, ] [package.dependencies] @@ -1998,6 +1998,7 @@ rq = ["rq (>=0.6)"] sanic = ["sanic (>=0.8)"] sqlalchemy = ["sqlalchemy (>=1.2)"] starlette = ["starlette (>=0.19.1)"] +starlite = ["starlite (>=1.48)"] tornado = ["tornado (>=5)"] [[package]] From 87e5f4599a9c58486e49ee13a54fb1f0a51c79d9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 19:09:15 +0000 Subject: [PATCH 036/344] Bump phonenumbers from 8.13.2 to 8.13.4 (#14849) Bumps [phonenumbers](https://github.com/daviddrysdale/python-phonenumbers) from 8.13.2 to 8.13.4. - [Release notes](https://github.com/daviddrysdale/python-phonenumbers/releases) - [Commits](https://github.com/daviddrysdale/python-phonenumbers/compare/v8.13.2...v8.13.4) --- updated-dependencies: - dependency-name: phonenumbers dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 46c83da7b595..681276ffc277 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1226,14 +1226,14 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.2" +version = "8.13.4" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." category = "main" optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.2-py2.py3-none-any.whl", hash = "sha256:884b26f775205261f4dc861371dce217c1661a4942fb3ec3624e290fb51869bf"}, - {file = "phonenumbers-8.13.2.tar.gz", hash = "sha256:0179f688d48c0e7e161eb7b9d86d587940af1f5174f97c1fdfd893c599c0d94a"}, + {file = "phonenumbers-8.13.4-py2.py3-none-any.whl", hash = "sha256:a577a46c069ad889c7b7cf4dd978751d059edeab28b97acead4775d2ea1fc70a"}, + {file = "phonenumbers-8.13.4.tar.gz", hash = "sha256:6d63455012fc9431105ffc7739befca61c3efc551b287dca58d2be2e745475a9"}, ] [[package]] From e1b2c7095d2107b1f466f1688e3a2040e23c7e9c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 19:16:43 +0000 Subject: [PATCH 037/344] Bump packaging from 22.0 to 23.0 (#14847) Bumps [packaging](https://github.com/pypa/packaging) from 22.0 to 23.0. - [Release notes](https://github.com/pypa/packaging/releases) - [Changelog](https://github.com/pypa/packaging/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pypa/packaging/compare/22.0...23.0) --- updated-dependencies: - dependency-name: packaging dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 681276ffc277..178e3787f7d4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1187,14 +1187,14 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte [[package]] name = "packaging" -version = "22.0" +version = "23.0" description = "Core utilities for Python packages" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "packaging-22.0-py3-none-any.whl", hash = "sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3"}, - {file = "packaging-22.0.tar.gz", hash = "sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3"}, + {file = "packaging-23.0-py3-none-any.whl", hash = "sha256:714ac14496c3e68c99c29b00845f7a2b85f3bb6f1078fd9f72fd20f0570002b2"}, + {file = "packaging-23.0.tar.gz", hash = "sha256:b6ad297f8907de0fa2fe1ccbd26fdaf387f5f47c7275fedf8cce89f99446cf97"}, ] [[package]] From 4d6b1d3c47387466d34abb98613ca0d240057e24 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 18 Jan 2023 09:27:57 -0500 Subject: [PATCH 038/344] Properly check for frozendicts in event auth code. (#14864) Check for for an instance of a mapping instead of a dict. This only affects room version 10 when frozen events are enabled. --- changelog.d/14864.bugfix | 1 + synapse/event_auth.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14864.bugfix diff --git a/changelog.d/14864.bugfix b/changelog.d/14864.bugfix new file mode 100644 index 000000000000..12c0c74ab3cd --- /dev/null +++ b/changelog.d/14864.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.64.0 when using room version 10 with frozen events enabled. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index d437b7e5d1d2..c4a7b16413c5 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import collections.abc import logging import typing from typing import ( @@ -877,7 +878,7 @@ def _check_power_levels( if not isinstance(v, int): raise SynapseError(400, f"{v!r} must be an integer.") if k in {"events", "notifications", "users"}: - if not isinstance(v, dict) or not all( + if not isinstance(v, collections.abc.Mapping) or not all( isinstance(v, int) for v in v.values() ): raise SynapseError( From e8f2bf5c40c27e68e5983ebbd1fc0281bc45bf5f Mon Sep 17 00:00:00 2001 From: Catalan Lover <48515417+FSG-Cat@users.noreply.github.com> Date: Wed, 18 Jan 2023 19:59:48 +0100 Subject: [PATCH 039/344] Change default room version to 10. Implements MSC3904 (#14111) * Change Documentation to have v10 as default room version * Change Default Room version to 10 * Add changelog entry for default room version swap * Add changelog entry for v10 default room version in docs * Clarify doc changelog entry Co-authored-by: David Robertson * Improve Documentation changes. Co-authored-by: David Robertson * Update Changelog entry to have correct format Co-authored-by: David Robertson * Update Spec Version to 1.5 * Only need 1 changelog. * Fix test. * Update "Changed in" line Co-authored-by: David Robertson Co-authored-by: Patrick Cloke Co-authored-by: Patrick Cloke --- changelog.d/14111.feature | 1 + docs/usage/configuration/config_documentation.md | 4 +++- synapse/config/server.py | 2 +- tests/rest/client/test_upgrade_room.py | 12 +++++++++--- 4 files changed, 14 insertions(+), 5 deletions(-) create mode 100644 changelog.d/14111.feature diff --git a/changelog.d/14111.feature b/changelog.d/14111.feature new file mode 100644 index 000000000000..0a794701a769 --- /dev/null +++ b/changelog.d/14111.feature @@ -0,0 +1 @@ +Update the default room version to [v10](https://spec.matrix.org/v1.5/rooms/v10/) ([MSC 3904](https://github.com/matrix-org/matrix-spec-proposals/pull/3904)). Contributed by @FSG-Cat. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 3481e866f705..2883f76a26cd 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -295,7 +295,9 @@ Known room versions are listed [here](https://spec.matrix.org/latest/rooms/#comp For example, for room version 1, `default_room_version` should be set to "1". -Currently defaults to "9". +Currently defaults to ["10"](https://spec.matrix.org/v1.5/rooms/v10/). + +_Changed in Synapse 1.76:_ the default version room version was increased from [9](https://spec.matrix.org/v1.5/rooms/v9/) to [10](https://spec.matrix.org/v1.5/rooms/v10/). Example configuration: ```yaml diff --git a/synapse/config/server.py b/synapse/config/server.py index ec46ca63adf5..80bcfa40800c 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -151,7 +151,7 @@ def generate_ip_set( "fec0::/10", ] -DEFAULT_ROOM_VERSION = "9" +DEFAULT_ROOM_VERSION = "10" ROOM_COMPLEXITY_TOO_GREAT = ( "Your homeserver is unable to join rooms this large or complex. " diff --git a/tests/rest/client/test_upgrade_room.py b/tests/rest/client/test_upgrade_room.py index 5e7bf97482b5..5ec343dd7fd2 100644 --- a/tests/rest/client/test_upgrade_room.py +++ b/tests/rest/client/test_upgrade_room.py @@ -199,9 +199,15 @@ def test_power_levels_tombstone(self) -> None: def test_stringy_power_levels(self) -> None: """The room upgrade converts stringy power levels to proper integers.""" + # Create a room on room version < 10. + room_id = self.helper.create_room_as( + self.creator, tok=self.creator_token, room_version="9" + ) + self.helper.join(room_id, self.other, tok=self.other_token) + # Retrieve the room's current power levels. power_levels = self.helper.get_state( - self.room_id, + room_id, "m.room.power_levels", tok=self.creator_token, ) @@ -217,14 +223,14 @@ def test_stringy_power_levels(self) -> None: # conscience, we ought to ensure it's upgrading from a sufficiently old # version of room. self.helper.send_state( - self.room_id, + room_id, "m.room.power_levels", body=power_levels, tok=self.creator_token, ) # Upgrade the room. Check the homeserver reports success. - channel = self._upgrade_room() + channel = self._upgrade_room(room_id=room_id) self.assertEqual(200, channel.code, channel.result) # Extract the new room ID. From 9187fd940e2b2bbfd4df7204053cc26b2707aad4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 18 Jan 2023 19:35:29 +0000 Subject: [PATCH 040/344] Wait for streams to catch up when processing HTTP replication. (#14820) This should hopefully mitigate a class of races where data gets out of sync due a HTTP replication request racing with the replication streams. --- changelog.d/14820.bugfix | 1 + synapse/handlers/federation_event.py | 4 + synapse/replication/http/_base.py | 97 +++++++++++++++++++++--- synapse/replication/http/account_data.py | 29 +++---- synapse/replication/http/devices.py | 10 +-- synapse/replication/http/federation.py | 28 +++---- synapse/replication/http/login.py | 5 +- synapse/replication/http/membership.py | 22 +++--- synapse/replication/http/presence.py | 7 +- synapse/replication/http/push.py | 5 +- synapse/replication/http/register.py | 9 +-- synapse/replication/http/send_event.py | 5 +- synapse/replication/http/send_events.py | 4 +- synapse/replication/http/state.py | 2 +- synapse/replication/http/streams.py | 6 +- synapse/replication/tcp/client.py | 25 +++++- synapse/replication/tcp/resource.py | 43 +++++------ synapse/storage/util/id_generators.py | 34 +++++---- synapse/types/__init__.py | 6 ++ tests/replication/http/test__base.py | 9 ++- tests/storage/test_id_generators.py | 20 +++-- 21 files changed, 226 insertions(+), 145 deletions(-) create mode 100644 changelog.d/14820.bugfix diff --git a/changelog.d/14820.bugfix b/changelog.d/14820.bugfix new file mode 100644 index 000000000000..36e94f2b9b96 --- /dev/null +++ b/changelog.d/14820.bugfix @@ -0,0 +1 @@ +Fix rare races when using workers. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 6df000faafed..904a721483c9 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -2259,6 +2259,10 @@ async def persist_events_and_notify( event_and_contexts, backfilled=backfilled ) + # After persistence we always need to notify replication there may + # be new data. + self._notifier.notify_replication() + if self._ephemeral_messages_enabled: for event in events: # If there's an expiry timestamp on the event, schedule its expiry. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 3f4d3fc51ae3..709327b97fb4 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -17,7 +17,7 @@ import re import urllib.parse from inspect import signature -from typing import TYPE_CHECKING, Any, Awaitable, Callable, Dict, List, Tuple +from typing import TYPE_CHECKING, Any, Awaitable, Callable, ClassVar, Dict, List, Tuple from prometheus_client import Counter, Gauge @@ -27,6 +27,7 @@ from synapse.api.errors import HttpResponseException, SynapseError from synapse.http import RequestTimedOutError from synapse.http.server import HttpServer +from synapse.http.servlet import parse_json_object_from_request from synapse.http.site import SynapseRequest from synapse.logging import opentracing from synapse.logging.opentracing import trace_with_opname @@ -53,6 +54,9 @@ ) +_STREAM_POSITION_KEY = "_INT_STREAM_POS" + + class ReplicationEndpoint(metaclass=abc.ABCMeta): """Helper base class for defining new replication HTTP endpoints. @@ -94,6 +98,9 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): a connection error is received. RETRY_ON_CONNECT_ERROR_ATTEMPTS (int): Number of attempts to retry when receiving connection errors, each will backoff exponentially longer. + WAIT_FOR_STREAMS (bool): Whether to wait for replication streams to + catch up before processing the request and/or response. Defaults to + True. """ NAME: str = abc.abstractproperty() # type: ignore @@ -104,6 +111,8 @@ class ReplicationEndpoint(metaclass=abc.ABCMeta): RETRY_ON_CONNECT_ERROR = True RETRY_ON_CONNECT_ERROR_ATTEMPTS = 5 # =63s (2^6-1) + WAIT_FOR_STREAMS: ClassVar[bool] = True + def __init__(self, hs: "HomeServer"): if self.CACHE: self.response_cache: ResponseCache[str] = ResponseCache( @@ -126,6 +135,10 @@ def __init__(self, hs: "HomeServer"): if hs.config.worker.worker_replication_secret: self._replication_secret = hs.config.worker.worker_replication_secret + self._streams = hs.get_replication_command_handler().get_streams_to_replicate() + self._replication = hs.get_replication_data_handler() + self._instance_name = hs.get_instance_name() + def _check_auth(self, request: Request) -> None: # Get the authorization header. auth_headers = request.requestHeaders.getRawHeaders(b"Authorization") @@ -160,7 +173,7 @@ async def _serialize_payload(**kwargs) -> JsonDict: @abc.abstractmethod async def _handle_request( - self, request: Request, **kwargs: Any + self, request: Request, content: JsonDict, **kwargs: Any ) -> Tuple[int, JsonDict]: """Handle incoming request. @@ -201,6 +214,10 @@ def make_client(cls, hs: "HomeServer") -> Callable: @trace_with_opname("outgoing_replication_request") async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: + # We have to pull these out here to avoid circular dependencies... + streams = hs.get_replication_command_handler().get_streams_to_replicate() + replication = hs.get_replication_data_handler() + with outgoing_gauge.track_inprogress(): if instance_name == local_instance_name: raise Exception("Trying to send HTTP request to self") @@ -219,6 +236,24 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: data = await cls._serialize_payload(**kwargs) + if cls.METHOD != "GET" and cls.WAIT_FOR_STREAMS: + # Include the current stream positions that we write to. We + # don't do this for GETs as they don't have a body, and we + # generally assume that a GET won't rely on data we have + # written. + if _STREAM_POSITION_KEY in data: + raise Exception( + "data to send contains %r key", _STREAM_POSITION_KEY + ) + + data[_STREAM_POSITION_KEY] = { + "streams": { + stream.NAME: stream.current_token(local_instance_name) + for stream in streams + }, + "instance_name": local_instance_name, + } + url_args = [ urllib.parse.quote(kwargs[name], safe="") for name in cls.PATH_ARGS ] @@ -308,6 +343,18 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: ) from e _outgoing_request_counter.labels(cls.NAME, 200).inc() + + # Wait on any streams that the remote may have written to. + for stream_name, position in result.get( + _STREAM_POSITION_KEY, {} + ).items(): + await replication.wait_for_stream_position( + instance_name=instance_name, + stream_name=stream_name, + position=position, + raise_on_timeout=False, + ) + return result return send_request @@ -353,6 +400,23 @@ async def _check_auth_and_handle( if self._replication_secret: self._check_auth(request) + if self.METHOD == "GET": + # GET APIs always have an empty body. + content = {} + else: + content = parse_json_object_from_request(request) + + # Wait on any streams that the remote may have written to. + for stream_name, position in content.get(_STREAM_POSITION_KEY, {"streams": {}})[ + "streams" + ].items(): + await self._replication.wait_for_stream_position( + instance_name=content[_STREAM_POSITION_KEY]["instance_name"], + stream_name=stream_name, + position=position, + raise_on_timeout=False, + ) + if self.CACHE: txn_id = kwargs.pop("txn_id") @@ -361,13 +425,28 @@ async def _check_auth_and_handle( # correctly yet. In particular, there may be issues to do with logging # context lifetimes. - return await self.response_cache.wrap( - txn_id, self._handle_request, request, **kwargs + code, response = await self.response_cache.wrap( + txn_id, self._handle_request, request, content, **kwargs ) + else: + # The `@cancellable` decorator may be applied to `_handle_request`. But we + # told `HttpServer.register_paths` that our handler is `_check_auth_and_handle`, + # so we have to set up the cancellable flag ourselves. + request.is_render_cancellable = is_function_cancellable( + self._handle_request + ) + + code, response = await self._handle_request(request, content, **kwargs) + + # Return streams we may have written to in the course of processing this + # request. + if _STREAM_POSITION_KEY in response: + raise Exception("data to send contains %r key", _STREAM_POSITION_KEY) - # The `@cancellable` decorator may be applied to `_handle_request`. But we - # told `HttpServer.register_paths` that our handler is `_check_auth_and_handle`, - # so we have to set up the cancellable flag ourselves. - request.is_render_cancellable = is_function_cancellable(self._handle_request) + if self.WAIT_FOR_STREAMS: + response[_STREAM_POSITION_KEY] = { + stream.NAME: stream.current_token(self._instance_name) + for stream in self._streams + } - return await self._handle_request(request, **kwargs) + return code, response diff --git a/synapse/replication/http/account_data.py b/synapse/replication/http/account_data.py index 0edc95977b3a..2374f810c94f 100644 --- a/synapse/replication/http/account_data.py +++ b/synapse/replication/http/account_data.py @@ -18,7 +18,6 @@ from twisted.web.server import Request from synapse.http.server import HttpServer -from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict @@ -61,10 +60,8 @@ async def _serialize_payload( # type: ignore[override] return payload async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str, account_data_type: str + self, request: Request, content: JsonDict, user_id: str, account_data_type: str ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - max_stream_id = await self.handler.add_account_data_for_user( user_id, account_data_type, content["content"] ) @@ -101,7 +98,7 @@ async def _serialize_payload( # type: ignore[override] return {} async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str, account_data_type: str + self, request: Request, content: JsonDict, user_id: str, account_data_type: str ) -> Tuple[int, JsonDict]: max_stream_id = await self.handler.remove_account_data_for_user( user_id, account_data_type @@ -143,10 +140,13 @@ async def _serialize_payload( # type: ignore[override] return payload async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str, room_id: str, account_data_type: str + self, + request: Request, + content: JsonDict, + user_id: str, + room_id: str, + account_data_type: str, ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - max_stream_id = await self.handler.add_account_data_to_room( user_id, room_id, account_data_type, content["content"] ) @@ -183,7 +183,12 @@ async def _serialize_payload( # type: ignore[override] return {} async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str, room_id: str, account_data_type: str + self, + request: Request, + content: JsonDict, + user_id: str, + room_id: str, + account_data_type: str, ) -> Tuple[int, JsonDict]: max_stream_id = await self.handler.remove_account_data_for_room( user_id, room_id, account_data_type @@ -225,10 +230,8 @@ async def _serialize_payload( # type: ignore[override] return payload async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str, room_id: str, tag: str + self, request: Request, content: JsonDict, user_id: str, room_id: str, tag: str ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - max_stream_id = await self.handler.add_tag_to_room( user_id, room_id, tag, content["content"] ) @@ -266,7 +269,7 @@ async def _serialize_payload(user_id: str, room_id: str, tag: str) -> JsonDict: return {} async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str, room_id: str, tag: str + self, request: Request, content: JsonDict, user_id: str, room_id: str, tag: str ) -> Tuple[int, JsonDict]: max_stream_id = await self.handler.remove_tag_from_room( user_id, diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index ea5c08e6cfdf..ecea6fc915c7 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -18,7 +18,6 @@ from twisted.web.server import Request from synapse.http.server import HttpServer -from synapse.http.servlet import parse_json_object_from_request from synapse.logging.opentracing import active_span from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict @@ -78,7 +77,7 @@ async def _serialize_payload(user_id: str) -> JsonDict: # type: ignore[override return {} async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str + self, request: Request, content: JsonDict, user_id: str ) -> Tuple[int, Optional[JsonDict]]: user_devices = await self.device_list_updater.user_device_resync(user_id) @@ -138,9 +137,8 @@ async def _serialize_payload(user_ids: List[str]) -> JsonDict: # type: ignore[o return {"user_ids": user_ids} async def _handle_request( # type: ignore[override] - self, request: Request + self, request: Request, content: JsonDict ) -> Tuple[int, Dict[str, Optional[JsonDict]]]: - content = parse_json_object_from_request(request) user_ids: List[str] = content["user_ids"] logger.info("Resync for %r", user_ids) @@ -205,10 +203,8 @@ async def _serialize_payload( # type: ignore[override] } async def _handle_request( # type: ignore[override] - self, request: Request + self, request: Request, content: JsonDict ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - user_id = content["user_id"] device_id = content["device_id"] keys = content["keys"] diff --git a/synapse/replication/http/federation.py b/synapse/replication/http/federation.py index d3abafed2871..53ad32703029 100644 --- a/synapse/replication/http/federation.py +++ b/synapse/replication/http/federation.py @@ -21,7 +21,6 @@ from synapse.events import EventBase, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.http.server import HttpServer -from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict from synapse.util.metrics import Measure @@ -114,10 +113,8 @@ async def _serialize_payload( # type: ignore[override] return payload - async def _handle_request(self, request: Request) -> Tuple[int, JsonDict]: # type: ignore[override] + async def _handle_request(self, request: Request, content: JsonDict) -> Tuple[int, JsonDict]: # type: ignore[override] with Measure(self.clock, "repl_fed_send_events_parse"): - content = parse_json_object_from_request(request) - room_id = content["room_id"] backfilled = content["backfilled"] @@ -181,13 +178,10 @@ async def _serialize_payload( # type: ignore[override] return {"origin": origin, "content": content} async def _handle_request( # type: ignore[override] - self, request: Request, edu_type: str + self, request: Request, content: JsonDict, edu_type: str ) -> Tuple[int, JsonDict]: - with Measure(self.clock, "repl_fed_send_edu_parse"): - content = parse_json_object_from_request(request) - - origin = content["origin"] - edu_content = content["content"] + origin = content["origin"] + edu_content = content["content"] logger.info("Got %r edu from %s", edu_type, origin) @@ -231,13 +225,10 @@ async def _serialize_payload(query_type: str, args: JsonDict) -> JsonDict: # ty return {"args": args} async def _handle_request( # type: ignore[override] - self, request: Request, query_type: str + self, request: Request, content: JsonDict, query_type: str ) -> Tuple[int, JsonDict]: - with Measure(self.clock, "repl_fed_query_parse"): - content = parse_json_object_from_request(request) - - args = content["args"] - args["origin"] = content["origin"] + args = content["args"] + args["origin"] = content["origin"] logger.info("Got %r query from %s", query_type, args["origin"]) @@ -274,7 +265,7 @@ async def _serialize_payload(room_id: str) -> JsonDict: # type: ignore[override return {} async def _handle_request( # type: ignore[override] - self, request: Request, room_id: str + self, request: Request, content: JsonDict, room_id: str ) -> Tuple[int, JsonDict]: await self.store.clean_room_for_join(room_id) @@ -307,9 +298,8 @@ async def _serialize_payload(room_id: str, room_version: RoomVersion) -> JsonDic return {"room_version": room_version.identifier} async def _handle_request( # type: ignore[override] - self, request: Request, room_id: str + self, request: Request, content: JsonDict, room_id: str ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) room_version = KNOWN_ROOM_VERSIONS[content["room_version"]] await self.store.maybe_store_room_on_outlier_membership(room_id, room_version) return 200, {} diff --git a/synapse/replication/http/login.py b/synapse/replication/http/login.py index c68e18da129b..6ad6cb1bfe4e 100644 --- a/synapse/replication/http/login.py +++ b/synapse/replication/http/login.py @@ -18,7 +18,6 @@ from twisted.web.server import Request from synapse.http.server import HttpServer -from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict @@ -73,10 +72,8 @@ async def _serialize_payload( # type: ignore[override] } async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str + self, request: Request, content: JsonDict, user_id: str ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - device_id = content["device_id"] initial_display_name = content["initial_display_name"] is_guest = content["is_guest"] diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 663bff573848..9fa1060d48f6 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -17,7 +17,6 @@ from twisted.web.server import Request from synapse.http.server import HttpServer -from synapse.http.servlet import parse_json_object_from_request from synapse.http.site import SynapseRequest from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict, Requester, UserID @@ -79,10 +78,8 @@ async def _serialize_payload( # type: ignore[override] } async def _handle_request( # type: ignore[override] - self, request: SynapseRequest, room_id: str, user_id: str + self, request: SynapseRequest, content: JsonDict, room_id: str, user_id: str ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - remote_room_hosts = content["remote_room_hosts"] event_content = content["content"] @@ -147,11 +144,10 @@ async def _serialize_payload( # type: ignore[override] async def _handle_request( # type: ignore[override] self, request: SynapseRequest, + content: JsonDict, room_id: str, user_id: str, ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - remote_room_hosts = content["remote_room_hosts"] event_content = content["content"] @@ -217,10 +213,8 @@ async def _serialize_payload( # type: ignore[override] } async def _handle_request( # type: ignore[override] - self, request: SynapseRequest, invite_event_id: str + self, request: SynapseRequest, content: JsonDict, invite_event_id: str ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - txn_id = content["txn_id"] event_content = content["content"] @@ -285,10 +279,9 @@ async def _serialize_payload( # type: ignore[override] async def _handle_request( # type: ignore[override] self, request: SynapseRequest, + content: JsonDict, knock_event_id: str, ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - txn_id = content["txn_id"] event_content = content["content"] @@ -347,7 +340,12 @@ async def _serialize_payload( # type: ignore[override] return {} async def _handle_request( # type: ignore[override] - self, request: Request, room_id: str, user_id: str, change: str + self, + request: Request, + content: JsonDict, + room_id: str, + user_id: str, + change: str, ) -> Tuple[int, JsonDict]: logger.info("user membership change: %s in %s", user_id, room_id) diff --git a/synapse/replication/http/presence.py b/synapse/replication/http/presence.py index 4a5b08f56f73..db16aac9c206 100644 --- a/synapse/replication/http/presence.py +++ b/synapse/replication/http/presence.py @@ -18,7 +18,6 @@ from twisted.web.server import Request from synapse.http.server import HttpServer -from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict, UserID @@ -56,7 +55,7 @@ async def _serialize_payload(user_id: str) -> JsonDict: # type: ignore[override return {} async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str + self, request: Request, content: JsonDict, user_id: str ) -> Tuple[int, JsonDict]: await self._presence_handler.bump_presence_active_time( UserID.from_string(user_id) @@ -107,10 +106,8 @@ async def _serialize_payload( # type: ignore[override] } async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str + self, request: Request, content: JsonDict, user_id: str ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - await self._presence_handler.set_state( UserID.from_string(user_id), content["state"], diff --git a/synapse/replication/http/push.py b/synapse/replication/http/push.py index af5c2f66a735..297e8ad564bd 100644 --- a/synapse/replication/http/push.py +++ b/synapse/replication/http/push.py @@ -18,7 +18,6 @@ from twisted.web.server import Request from synapse.http.server import HttpServer -from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict @@ -61,10 +60,8 @@ async def _serialize_payload(app_id: str, pushkey: str, user_id: str) -> JsonDic return payload async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str + self, request: Request, content: JsonDict, user_id: str ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - app_id = content["app_id"] pushkey = content["pushkey"] diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 976c2833603d..265e601b96a9 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -18,7 +18,6 @@ from twisted.web.server import Request from synapse.http.server import HttpServer -from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict @@ -96,10 +95,8 @@ async def _serialize_payload( # type: ignore[override] } async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str + self, request: Request, content: JsonDict, user_id: str ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - await self.registration_handler.check_registration_ratelimit(content["address"]) # Always default admin users to approved (since it means they were created by @@ -150,10 +147,8 @@ async def _serialize_payload( # type: ignore[override] return {"auth_result": auth_result, "access_token": access_token} async def _handle_request( # type: ignore[override] - self, request: Request, user_id: str + self, request: Request, content: JsonDict, user_id: str ) -> Tuple[int, JsonDict]: - content = parse_json_object_from_request(request) - auth_result = content["auth_result"] access_token = content["access_token"] diff --git a/synapse/replication/http/send_event.py b/synapse/replication/http/send_event.py index 4215a1c1bc41..27ad91407502 100644 --- a/synapse/replication/http/send_event.py +++ b/synapse/replication/http/send_event.py @@ -21,7 +21,6 @@ from synapse.events import EventBase, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.http.server import HttpServer -from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict, Requester, UserID from synapse.util.metrics import Measure @@ -114,11 +113,9 @@ async def _serialize_payload( # type: ignore[override] return payload async def _handle_request( # type: ignore[override] - self, request: Request, event_id: str + self, request: Request, content: JsonDict, event_id: str ) -> Tuple[int, JsonDict]: with Measure(self.clock, "repl_send_event_parse"): - content = parse_json_object_from_request(request) - event_dict = content["event"] room_ver = KNOWN_ROOM_VERSIONS[content["room_version"]] internal_metadata = content["internal_metadata"] diff --git a/synapse/replication/http/send_events.py b/synapse/replication/http/send_events.py index 8889bbb644e1..4f82c9f96daa 100644 --- a/synapse/replication/http/send_events.py +++ b/synapse/replication/http/send_events.py @@ -21,7 +21,6 @@ from synapse.events import EventBase, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.http.server import HttpServer -from synapse.http.servlet import parse_json_object_from_request from synapse.replication.http._base import ReplicationEndpoint from synapse.types import JsonDict, Requester, UserID from synapse.util.metrics import Measure @@ -114,10 +113,9 @@ async def _serialize_payload( # type: ignore[override] return payload async def _handle_request( # type: ignore[override] - self, request: Request + self, request: Request, payload: JsonDict ) -> Tuple[int, JsonDict]: with Measure(self.clock, "repl_send_events_parse"): - payload = parse_json_object_from_request(request) events_and_context = [] events = payload["events"] diff --git a/synapse/replication/http/state.py b/synapse/replication/http/state.py index 838b7584e56f..0c524e7de3fd 100644 --- a/synapse/replication/http/state.py +++ b/synapse/replication/http/state.py @@ -57,7 +57,7 @@ async def _serialize_payload(room_id: str) -> JsonDict: # type: ignore[override return {} async def _handle_request( # type: ignore[override] - self, request: Request, room_id: str + self, request: Request, content: JsonDict, room_id: str ) -> Tuple[int, JsonDict]: writer_instance = self._events_shard_config.get_instance(room_id) if writer_instance != self._instance_name: diff --git a/synapse/replication/http/streams.py b/synapse/replication/http/streams.py index c06522536254..3c7b5b18eab8 100644 --- a/synapse/replication/http/streams.py +++ b/synapse/replication/http/streams.py @@ -54,6 +54,10 @@ class ReplicationGetStreamUpdates(ReplicationEndpoint): PATH_ARGS = ("stream_name",) METHOD = "GET" + # We don't want to wait for replication streams to catch up, as this gets + # called in the process of catching replication streams up. + WAIT_FOR_STREAMS = False + def __init__(self, hs: "HomeServer"): super().__init__(hs) @@ -67,7 +71,7 @@ async def _serialize_payload( # type: ignore[override] return {"from_token": from_token, "upto_token": upto_token} async def _handle_request( # type: ignore[override] - self, request: Request, stream_name: str + self, request: Request, content: JsonDict, stream_name: str ) -> Tuple[int, JsonDict]: stream = self.streams.get(stream_name) if stream is None: diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 322d695bc7f0..5c2482e40cb6 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -16,6 +16,7 @@ import logging from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple +from twisted.internet import defer from twisted.internet.defer import Deferred from twisted.internet.interfaces import IAddress, IConnector from twisted.internet.protocol import ReconnectingClientFactory @@ -314,10 +315,21 @@ def on_remote_server_up(self, server: str) -> None: self.send_handler.wake_destination(server) async def wait_for_stream_position( - self, instance_name: str, stream_name: str, position: int + self, + instance_name: str, + stream_name: str, + position: int, + raise_on_timeout: bool = True, ) -> None: """Wait until this instance has received updates up to and including the given stream position. + + Args: + instance_name + stream_name + position + raise_on_timeout: Whether to raise an exception if we time out + waiting for the updates, or if we log an error and return. """ if instance_name == self._instance_name: @@ -345,7 +357,16 @@ async def wait_for_stream_position( # We measure here to get in flight counts and average waiting time. with Measure(self._clock, "repl.wait_for_stream_position"): logger.info("Waiting for repl stream %r to reach %s", stream_name, position) - await make_deferred_yieldable(deferred) + try: + await make_deferred_yieldable(deferred) + except defer.TimeoutError: + logger.error("Timed out waiting for stream %s", stream_name) + + if raise_on_timeout: + raise + + return + logger.info( "Finished waiting for repl stream %r to reach %s", stream_name, position ) diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 99f09669f00b..9d17eff71451 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -199,33 +199,28 @@ async def _run_notifier_loop(self) -> None: # The token has advanced but there is no data to # send, so we send a `POSITION` to inform other # workers of the updated position. - if stream.NAME == EventsStream.NAME: - # XXX: We only do this for the EventStream as it - # turns out that e.g. account data streams share - # their "current token" with each other, meaning - # that it is *not* safe to send a POSITION. - - # Note: `last_token` may not *actually* be the - # last token we sent out in a RDATA or POSITION. - # This can happen if we sent out an RDATA for - # position X when our current token was say X+1. - # Other workers will see RDATA for X and then a - # POSITION with last token of X+1, which will - # cause them to check if there were any missing - # updates between X and X+1. - logger.info( - "Sending position: %s -> %s", + + # Note: `last_token` may not *actually* be the + # last token we sent out in a RDATA or POSITION. + # This can happen if we sent out an RDATA for + # position X when our current token was say X+1. + # Other workers will see RDATA for X and then a + # POSITION with last token of X+1, which will + # cause them to check if there were any missing + # updates between X and X+1. + logger.info( + "Sending position: %s -> %s", + stream.NAME, + current_token, + ) + self.command_handler.send_command( + PositionCommand( stream.NAME, + self._instance_name, + last_token, current_token, ) - self.command_handler.send_command( - PositionCommand( - stream.NAME, - self._instance_name, - last_token, - current_token, - ) - ) + ) continue # Some streams return multiple rows with the same stream IDs, diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 0d7108f01b41..8670ffbfa374 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -378,6 +378,12 @@ def __init__( self._current_positions.values(), default=1 ) + if not writers: + # If there have been no explicit writers given then any instance can + # write to the stream. In which case, let's pre-seed our own + # position with the current minimum. + self._current_positions[self._instance_name] = self._persisted_upto_position + def _load_current_ids( self, db_conn: LoggingDatabaseConnection, @@ -695,24 +701,22 @@ def _add_persisted_position(self, new_id: int) -> None: heapq.heappush(self._known_persisted_positions, new_id) - # If we're a writer and we don't have any active writes we update our - # current position to the latest position seen. This allows the instance - # to report a recent position when asked, rather than a potentially old - # one (if this instance hasn't written anything for a while). - our_current_position = self._current_positions.get(self._instance_name) - if ( - our_current_position - and not self._unfinished_ids - and not self._in_flight_fetches - ): - self._current_positions[self._instance_name] = max( - our_current_position, new_id - ) - # We move the current min position up if the minimum current positions # of all instances is higher (since by definition all positions less # that that have been persisted). - min_curr = min(self._current_positions.values(), default=0) + our_current_position = self._current_positions.get(self._instance_name, 0) + min_curr = min( + ( + token + for name, token in self._current_positions.items() + if name != self._instance_name + ), + default=our_current_position, + ) + + if our_current_position and (self._unfinished_ids or self._in_flight_fetches): + min_curr = min(min_curr, our_current_position) + self._persisted_upto_position = max(min_curr, self._persisted_upto_position) # We now iterate through the seen positions, discarding those that are diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 0c725eb9677d..c59eca24301e 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -604,6 +604,12 @@ async def to_string(self, store: "DataStore") -> str: elif self.instance_map: entries = [] for name, pos in self.instance_map.items(): + if pos <= self.stream: + # Ignore instances who are below the minimum stream position + # (we might know they've advanced without seeing a recent + # write from them). + continue + instance_id = await store.get_id_for_instance(name) entries.append(f"{instance_id}.{pos}") diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py index 936ab4504a79..e03d9b4cc098 100644 --- a/tests/replication/http/test__base.py +++ b/tests/replication/http/test__base.py @@ -44,7 +44,7 @@ async def _serialize_payload() -> JsonDict: @cancellable async def _handle_request( # type: ignore[override] - self, request: Request + self, request: Request, content: JsonDict ) -> Tuple[int, JsonDict]: await self.clock.sleep(1.0) return HTTPStatus.OK, {"result": True} @@ -54,6 +54,7 @@ class UncancellableReplicationEndpoint(ReplicationEndpoint): NAME = "uncancellable_sleep" PATH_ARGS = () CACHE = False + WAIT_FOR_STREAMS = False def __init__(self, hs: HomeServer): super().__init__(hs) @@ -64,7 +65,7 @@ async def _serialize_payload() -> JsonDict: return {} async def _handle_request( # type: ignore[override] - self, request: Request + self, request: Request, content: JsonDict ) -> Tuple[int, JsonDict]: await self.clock.sleep(1.0) return HTTPStatus.OK, {"result": True} @@ -85,7 +86,7 @@ def create_test_resource(self): def test_cancellable_disconnect(self) -> None: """Test that handlers with the `@cancellable` flag can be cancelled.""" path = f"{REPLICATION_PREFIX}/{CancellableReplicationEndpoint.NAME}/" - channel = self.make_request("POST", path, await_result=False) + channel = self.make_request("POST", path, await_result=False, content={}) test_disconnect( self.reactor, channel, @@ -96,7 +97,7 @@ def test_cancellable_disconnect(self) -> None: def test_uncancellable_disconnect(self) -> None: """Test that handlers without the `@cancellable` flag cannot be cancelled.""" path = f"{REPLICATION_PREFIX}/{UncancellableReplicationEndpoint.NAME}/" - channel = self.make_request("POST", path, await_result=False) + channel = self.make_request("POST", path, await_result=False, content={}) test_disconnect( self.reactor, channel, diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index d6a2b8d2743e..ff9691c518bc 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -349,8 +349,8 @@ def test_multi_instance(self) -> None: # The first ID gen will notice that it can advance its token to 7 as it # has no in progress writes... - self.assertEqual(first_id_gen.get_positions(), {"first": 7, "second": 7}) - self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) + self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 7}) + self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 3) self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 7) # ... but the second ID gen doesn't know that. @@ -366,8 +366,9 @@ async def _get_next_async() -> None: self.assertEqual(stream_id, 8) self.assertEqual( - first_id_gen.get_positions(), {"first": 7, "second": 7} + first_id_gen.get_positions(), {"first": 3, "second": 7} ) + self.assertEqual(first_id_gen.get_persisted_upto_position(), 7) self.get_success(_get_next_async()) @@ -473,7 +474,7 @@ def test_get_persisted_upto_position_get_next(self) -> None: id_gen = self._create_id_generator("first", writers=["first", "second"]) - self.assertEqual(id_gen.get_positions(), {"first": 5, "second": 5}) + self.assertEqual(id_gen.get_positions(), {"first": 3, "second": 5}) self.assertEqual(id_gen.get_persisted_upto_position(), 5) @@ -720,7 +721,7 @@ async def _get_next_async2() -> None: self.get_success(_get_next_async2()) - self.assertEqual(id_gen_1.get_positions(), {"first": -2, "second": -2}) + self.assertEqual(id_gen_1.get_positions(), {"first": -1, "second": -2}) self.assertEqual(id_gen_2.get_positions(), {"first": -1, "second": -2}) self.assertEqual(id_gen_1.get_persisted_upto_position(), -2) self.assertEqual(id_gen_2.get_persisted_upto_position(), -2) @@ -816,15 +817,12 @@ def test_load_existing_stream(self) -> None: first_id_gen = self._create_id_generator("first", writers=["first", "second"]) second_id_gen = self._create_id_generator("second", writers=["first", "second"]) - # The first ID gen will notice that it can advance its token to 7 as it - # has no in progress writes... - self.assertEqual(first_id_gen.get_positions(), {"first": 7, "second": 6}) - self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 7) + self.assertEqual(first_id_gen.get_positions(), {"first": 3, "second": 6}) + self.assertEqual(first_id_gen.get_current_token_for_writer("first"), 3) self.assertEqual(first_id_gen.get_current_token_for_writer("second"), 6) self.assertEqual(first_id_gen.get_persisted_upto_position(), 7) - # ... but the second ID gen doesn't know that. self.assertEqual(second_id_gen.get_positions(), {"first": 3, "second": 7}) self.assertEqual(second_id_gen.get_current_token_for_writer("first"), 3) self.assertEqual(second_id_gen.get_current_token_for_writer("second"), 7) - self.assertEqual(first_id_gen.get_persisted_upto_position(), 7) + self.assertEqual(second_id_gen.get_persisted_upto_position(), 7) From 2069231645169a7b3526fc742e76948c4363660a Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 19 Jan 2023 11:58:17 +0000 Subject: [PATCH 041/344] Update logging_sample_config.md (#14868) You do not have to restart synapse to reload the log config. --- changelog.d/14868.doc | 1 + docs/usage/configuration/logging_sample_config.md | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14868.doc diff --git a/changelog.d/14868.doc b/changelog.d/14868.doc new file mode 100644 index 000000000000..b5ddff78a1ca --- /dev/null +++ b/changelog.d/14868.doc @@ -0,0 +1 @@ +Minor corrections to the logging configuration documentation. diff --git a/docs/usage/configuration/logging_sample_config.md b/docs/usage/configuration/logging_sample_config.md index 499ab7cfe500..895674199738 100644 --- a/docs/usage/configuration/logging_sample_config.md +++ b/docs/usage/configuration/logging_sample_config.md @@ -1,9 +1,11 @@ # Logging Sample Configuration File Below is a sample logging configuration file. This file can be tweaked to control how your -homeserver will output logs. A restart of the server is generally required to apply any -changes made to this file. The value of the `log_config` option in your homeserver -config should be the path to this file. +homeserver will output logs. The value of the `log_config` option in your homeserver config +should be the path to this file. + +To apply changes made to this file, send Synapse a SIGHUP signal (or, if using `systemd`, run +`systemctl reload` on the Synapse service). Note that a default logging configuration (shown below) is created automatically alongside the homeserver config when following the [installation instructions](../../setup/installation.md). From a7b54ca8d84e9371244d792c30fc9084579470e1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 19 Jan 2023 12:47:10 +0000 Subject: [PATCH 042/344] Implement MSC3930: polls push rules (#14787) --- changelog.d/14787.feature | 1 + .../conf/workers-shared-extra.yaml.j2 | 6 +- rust/benches/evaluator.rs | 9 ++- rust/src/push/base_rules.rs | 78 ++++++++++++++++++- rust/src/push/evaluator.rs | 2 +- rust/src/push/mod.rs | 16 +++- scripts-dev/complement.sh | 2 +- stubs/synapse/synapse_rust/push.pyi | 3 +- synapse/config/experimental.py | 7 ++ synapse/storage/databases/main/push_rule.py | 3 +- 10 files changed, 114 insertions(+), 13 deletions(-) create mode 100644 changelog.d/14787.feature diff --git a/changelog.d/14787.feature b/changelog.d/14787.feature new file mode 100644 index 000000000000..6a340350470c --- /dev/null +++ b/changelog.d/14787.feature @@ -0,0 +1 @@ +Implement experimental support for MSC3930: Push rules for (MSC3381) Polls. \ No newline at end of file diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index 7e9ec23808c6..281157846ace 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -98,12 +98,14 @@ experimental_features: # client-side support for partial state in /send_join responses faster_joins: true {% endif %} - # Filtering /messages by relation type. - msc3874_enabled: true + # Enable support for polls + msc3381_polls_enabled: true # Enable deleting device-specific notification settings stored in account data msc3890_enabled: true # Enable removing account data support msc3391_enabled: true + # Filtering /messages by relation type. + msc3874_enabled: true server_notices: system_mxid_localpart: _server diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 442a79348fcf..8c28bb0af3dc 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -150,8 +150,13 @@ fn bench_eval_message(b: &mut Bencher) { ) .unwrap(); - let rules = - FilteredPushRules::py_new(PushRules::new(Vec::new()), Default::default(), false, false); + let rules = FilteredPushRules::py_new( + PushRules::new(Vec::new()), + Default::default(), + false, + false, + false, + ); b.iter(|| eval.run(&rules, Some("bob"), Some("person"))); } diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 35129691ca43..9140a69bb654 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -1,4 +1,4 @@ -// Copyright 2022 The Matrix.org Foundation C.I.C. +// Copyright 2022, 2023 The Matrix.org Foundation C.I.C. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -208,6 +208,20 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default: true, default_enabled: true, }, + PushRule { + rule_id: Cow::Borrowed("global/override/.org.matrix.msc3930.rule.poll_response"), + priority_class: 5, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.response")), + pattern_type: None, + }, + ))]), + actions: Cow::Borrowed(&[]), + default: true, + default_enabled: true, + }, ]; pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule { @@ -596,6 +610,68 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ default: true, default_enabled: true, }, + PushRule { + rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_start_one_to_one"), + priority_class: 1, + conditions: Cow::Borrowed(&[ + Condition::Known(KnownCondition::RoomMemberCount { + is: Some(Cow::Borrowed("2")), + }), + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")), + pattern_type: None, + })), + ]), + actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_start"), + priority_class: 1, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")), + pattern_type: None, + }, + ))]), + actions: Cow::Borrowed(&[Action::Notify]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_end_one_to_one"), + priority_class: 1, + conditions: Cow::Borrowed(&[ + Condition::Known(KnownCondition::RoomMemberCount { + is: Some(Cow::Borrowed("2")), + }), + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")), + pattern_type: None, + })), + ]), + actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3930.rule.poll_end"), + priority_class: 1, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")), + pattern_type: None, + }, + ))]), + actions: Cow::Borrowed(&[Action::Notify]), + default: true, + default_enabled: true, + }, ]; lazy_static! { diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index c901c0fbcc60..0242ee1c5ffc 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -483,7 +483,7 @@ fn test_requires_room_version_supports_condition() { }; let rules = PushRules::new(vec![custom_rule]); result = evaluator.run( - &FilteredPushRules::py_new(rules, BTreeMap::new(), true, true), + &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true), None, None, ); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 2e9d3e38a17b..842b13c88b2c 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -411,8 +411,9 @@ impl PushRules { pub struct FilteredPushRules { push_rules: PushRules, enabled_map: BTreeMap, - msc3664_enabled: bool, msc1767_enabled: bool, + msc3381_polls_enabled: bool, + msc3664_enabled: bool, } #[pymethods] @@ -421,14 +422,16 @@ impl FilteredPushRules { pub fn py_new( push_rules: PushRules, enabled_map: BTreeMap, - msc3664_enabled: bool, msc1767_enabled: bool, + msc3381_polls_enabled: bool, + msc3664_enabled: bool, ) -> Self { Self { push_rules, enabled_map, - msc3664_enabled, msc1767_enabled, + msc3381_polls_enabled, + msc3664_enabled, } } @@ -447,13 +450,18 @@ impl FilteredPushRules { .iter() .filter(|rule| { // Ignore disabled experimental push rules + + if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") { + return false; + } + if !self.msc3664_enabled && rule.rule_id == "global/override/.im.nheko.msc3664.reply" { return false; } - if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") { + if !self.msc3381_polls_enabled && rule.rule_id.contains("org.matrix.msc3930") { return false; } diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 7c48d8bccb0f..a183653d5210 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -190,7 +190,7 @@ fi extra_test_args=() -test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391" +test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930" # All environment variables starting with PASS_ will be shared. # (The prefix is stripped off before reaching the container.) diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 373b40740b37..304ed7111c20 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -43,8 +43,9 @@ class FilteredPushRules: self, push_rules: PushRules, enabled_map: Dict[str, bool], - msc3664_enabled: bool, msc1767_enabled: bool, + msc3381_polls_enabled: bool, + msc3664_enabled: bool, ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 0444ef82441e..89586db76313 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -146,6 +146,13 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "required to communicate account data deletions to clients." ) + # MSC3381: Polls. + # In practice, supporting polls in Synapse only requires an implementation of + # MSC3930: Push rules for MSC3391 polls; which is what this option enables. + self.msc3381_polls_enabled: bool = experimental.get( + "msc3381_polls_enabled", False + ) + # MSC3912: Relation-based redactions. self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False) diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index d4e4b777da95..03182887d138 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -86,8 +86,9 @@ def _load_rules( filtered_rules = FilteredPushRules( push_rules, enabled_map, - msc3664_enabled=experimental_config.msc3664_enabled, msc1767_enabled=experimental_config.msc1767_enabled, + msc3664_enabled=experimental_config.msc3664_enabled, + msc3381_polls_enabled=experimental_config.msc3381_polls_enabled, ) return filtered_rules From cdf2707678dc9f08e965eb0f0c1f39e71552fe3e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 19 Jan 2023 22:19:56 +0000 Subject: [PATCH 043/344] Fix bug in wait for stream position (#14872) This caused some requests to fail. This caused some requests to fail. This really only started causing issues due to #14856 --- changelog.d/14872.misc | 1 + synapse/replication/tcp/client.py | 29 +++++++++++++++++++---------- 2 files changed, 20 insertions(+), 10 deletions(-) create mode 100644 changelog.d/14872.misc diff --git a/changelog.d/14872.misc b/changelog.d/14872.misc new file mode 100644 index 000000000000..3731d6cbf184 --- /dev/null +++ b/changelog.d/14872.misc @@ -0,0 +1 @@ +Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 5c2482e40cb6..6e242c57493e 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -133,9 +133,9 @@ def __init__(self, hs: "HomeServer"): if hs.should_send_federation(): self.send_handler = FederationSenderHandler(hs) - # Map from stream to list of deferreds waiting for the stream to + # Map from stream and instance to list of deferreds waiting for the stream to # arrive at a particular position. The lists are sorted by stream position. - self._streams_to_waiters: Dict[str, List[Tuple[int, Deferred]]] = {} + self._streams_to_waiters: Dict[Tuple[str, str], List[Tuple[int, Deferred]]] = {} async def on_rdata( self, stream_name: str, instance_name: str, token: int, rows: list @@ -270,7 +270,7 @@ async def on_rdata( # Notify any waiting deferreds. The list is ordered by position so we # just iterate through the list until we reach a position that is # greater than the received row position. - waiting_list = self._streams_to_waiters.get(stream_name, []) + waiting_list = self._streams_to_waiters.get((stream_name, instance_name), []) # Index of first item with a position after the current token, i.e we # have called all deferreds before this index. If not overwritten by @@ -279,14 +279,13 @@ async def on_rdata( # `len(list)` works for both cases. index_of_first_deferred_not_called = len(waiting_list) + # We don't fire the deferreds until after we finish iterating over the + # list, to avoid the list changing when we fire the deferreds. + deferreds_to_callback = [] + for idx, (position, deferred) in enumerate(waiting_list): if position <= token: - try: - with PreserveLoggingContext(): - deferred.callback(None) - except Exception: - # The deferred has been cancelled or timed out. - pass + deferreds_to_callback.append(deferred) else: # The list is sorted by position so we don't need to continue # checking any further entries in the list. @@ -297,6 +296,14 @@ async def on_rdata( # loop. (This maintains the order so no need to resort) waiting_list[:] = waiting_list[index_of_first_deferred_not_called:] + for deferred in deferreds_to_callback: + try: + with PreserveLoggingContext(): + deferred.callback(None) + except Exception: + # The deferred has been cancelled or timed out. + pass + async def on_position( self, stream_name: str, instance_name: str, token: int ) -> None: @@ -349,7 +356,9 @@ async def wait_for_stream_position( deferred, _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS, self._reactor ) - waiting_list = self._streams_to_waiters.setdefault(stream_name, []) + waiting_list = self._streams_to_waiters.setdefault( + (stream_name, instance_name), [] + ) waiting_list.append((position, deferred)) waiting_list.sort(key=lambda t: t[0]) From cdea7c11d082e73606bea5d0462f7971e90d836c Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 20 Jan 2023 12:06:19 +0000 Subject: [PATCH 044/344] Faster joins: Avoid starting duplicate partial state syncs (#14844) Currently, we will try to start a new partial state sync every time we perform a remote join, which is undesirable if there is already one running for a given room. We intend to perform remote joins whenever additional local users wish to join a partial state room, so let's ensure that we do not start more than one concurrent partial state sync for any given room. ------------------------------------------------------------------------ There is a race condition where the homeserver leaves a room and later rejoins while the partial state sync from the previous membership is still running. There is no guarantee that the previous partial state sync will process the latest join, so we restart it if needed. Signed-off-by: Sean Quah --- changelog.d/14844.misc | 1 + synapse/handlers/federation.py | 106 +++++++++++++++++++++++++--- tests/handlers/test_federation.py | 112 +++++++++++++++++++++++++++++- 3 files changed, 210 insertions(+), 9 deletions(-) create mode 100644 changelog.d/14844.misc diff --git a/changelog.d/14844.misc b/changelog.d/14844.misc new file mode 100644 index 000000000000..30ce8663045f --- /dev/null +++ b/changelog.d/14844.misc @@ -0,0 +1 @@ +Add check to avoid starting duplicate partial state syncs. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index eca75f1108d1..e386f77de6d9 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -27,6 +27,7 @@ Iterable, List, Optional, + Set, Tuple, Union, ) @@ -171,12 +172,23 @@ def __init__(self, hs: "HomeServer"): self.third_party_event_rules = hs.get_third_party_event_rules() + # Tracks running partial state syncs by room ID. + # Partial state syncs currently only run on the main process, so it's okay to + # track them in-memory for now. + self._active_partial_state_syncs: Set[str] = set() + # Tracks partial state syncs we may want to restart. + # A dictionary mapping room IDs to (initial destination, other destinations) + # tuples. + self._partial_state_syncs_maybe_needing_restart: Dict[ + str, Tuple[Optional[str], Collection[str]] + ] = {} + # if this is the main process, fire off a background process to resume # any partial-state-resync operations which were in flight when we # were shut down. if not hs.config.worker.worker_app: run_as_background_process( - "resume_sync_partial_state_room", self._resume_sync_partial_state_room + "resume_sync_partial_state_room", self._resume_partial_state_room_sync ) @trace @@ -679,9 +691,7 @@ async def do_invite_join( if ret.partial_state: # Kick off the process of asynchronously fetching the state for this # room. - run_as_background_process( - desc="sync_partial_state_room", - func=self._sync_partial_state_room, + self._start_partial_state_room_sync( initial_destination=origin, other_destinations=ret.servers_in_room, room_id=room_id, @@ -1660,20 +1670,100 @@ async def get_room_complexity( # well. return None - async def _resume_sync_partial_state_room(self) -> None: + async def _resume_partial_state_room_sync(self) -> None: """Resumes resyncing of all partial-state rooms after a restart.""" assert not self.config.worker.worker_app partial_state_rooms = await self.store.get_partial_state_room_resync_info() for room_id, resync_info in partial_state_rooms.items(): - run_as_background_process( - desc="sync_partial_state_room", - func=self._sync_partial_state_room, + self._start_partial_state_room_sync( initial_destination=resync_info.joined_via, other_destinations=resync_info.servers_in_room, room_id=room_id, ) + def _start_partial_state_room_sync( + self, + initial_destination: Optional[str], + other_destinations: Collection[str], + room_id: str, + ) -> None: + """Starts the background process to resync the state of a partial state room, + if it is not already running. + + Args: + initial_destination: the initial homeserver to pull the state from + other_destinations: other homeservers to try to pull the state from, if + `initial_destination` is unavailable + room_id: room to be resynced + """ + + async def _sync_partial_state_room_wrapper() -> None: + if room_id in self._active_partial_state_syncs: + # Another local user has joined the room while there is already a + # partial state sync running. This implies that there is a new join + # event to un-partial state. We might find ourselves in one of a few + # scenarios: + # 1. There is an existing partial state sync. The partial state sync + # un-partial states the new join event before completing and all is + # well. + # 2. Before the latest join, the homeserver was no longer in the room + # and there is an existing partial state sync from our previous + # membership of the room. The partial state sync may have: + # a) succeeded, but not yet terminated. The room will not be + # un-partial stated again unless we restart the partial state + # sync. + # b) failed, because we were no longer in the room and remote + # homeservers were refusing our requests, but not yet + # terminated. After the latest join, remote homeservers may + # start answering our requests again, so we should restart the + # partial state sync. + # In the cases where we would want to restart the partial state sync, + # the room would have the partial state flag when the partial state sync + # terminates. + self._partial_state_syncs_maybe_needing_restart[room_id] = ( + initial_destination, + other_destinations, + ) + return + + self._active_partial_state_syncs.add(room_id) + + try: + await self._sync_partial_state_room( + initial_destination=initial_destination, + other_destinations=other_destinations, + room_id=room_id, + ) + finally: + # Read the room's partial state flag while we still hold the claim to + # being the active partial state sync (so that another partial state + # sync can't come along and mess with it under us). + # Normally, the partial state flag will be gone. If it isn't, then we + # may find ourselves in scenario 2a or 2b as described in the comment + # above, where we want to restart the partial state sync. + is_still_partial_state_room = await self.store.is_partial_state_room( + room_id + ) + self._active_partial_state_syncs.remove(room_id) + + if room_id in self._partial_state_syncs_maybe_needing_restart: + ( + restart_initial_destination, + restart_other_destinations, + ) = self._partial_state_syncs_maybe_needing_restart.pop(room_id) + + if is_still_partial_state_room: + self._start_partial_state_room_sync( + initial_destination=restart_initial_destination, + other_destinations=restart_other_destinations, + room_id=room_id, + ) + + run_as_background_process( + desc="sync_partial_state_room", func=_sync_partial_state_room_wrapper + ) + async def _sync_partial_state_room( self, initial_destination: Optional[str], diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index cedbb9fafcfa..c1558c40c370 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -12,10 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import cast +from typing import Collection, Optional, cast from unittest import TestCase from unittest.mock import Mock, patch +from twisted.internet.defer import Deferred from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes @@ -679,3 +680,112 @@ def test_failed_partial_join_is_clean(self) -> None: f"Stale partial-stated room flag left over for {room_id} after a" f" failed do_invite_join!", ) + + def test_duplicate_partial_state_room_syncs(self) -> None: + """ + Tests that concurrent partial state syncs are not started for the same room. + """ + is_partial_state = True + end_sync: "Deferred[None]" = Deferred() + + async def is_partial_state_room(room_id: str) -> bool: + return is_partial_state + + async def sync_partial_state_room( + initial_destination: Optional[str], + other_destinations: Collection[str], + room_id: str, + ) -> None: + nonlocal end_sync + try: + await end_sync + finally: + end_sync = Deferred() + + mock_is_partial_state_room = Mock(side_effect=is_partial_state_room) + mock_sync_partial_state_room = Mock(side_effect=sync_partial_state_room) + + fed_handler = self.hs.get_federation_handler() + store = self.hs.get_datastores().main + + with patch.object( + fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room + ), patch.object(store, "is_partial_state_room", mock_is_partial_state_room): + # Start the partial state sync. + fed_handler._start_partial_state_room_sync("hs1", ["hs2"], "room_id") + self.assertEqual(mock_sync_partial_state_room.call_count, 1) + + # Try to start another partial state sync. + # Nothing should happen. + fed_handler._start_partial_state_room_sync("hs3", ["hs2"], "room_id") + self.assertEqual(mock_sync_partial_state_room.call_count, 1) + + # End the partial state sync + is_partial_state = False + end_sync.callback(None) + + # The partial state sync should not be restarted. + self.assertEqual(mock_sync_partial_state_room.call_count, 1) + + # The next attempt to start the partial state sync should work. + is_partial_state = True + fed_handler._start_partial_state_room_sync("hs3", ["hs2"], "room_id") + self.assertEqual(mock_sync_partial_state_room.call_count, 2) + + def test_partial_state_room_sync_restart(self) -> None: + """ + Tests that partial state syncs are restarted when a second partial state sync + was deduplicated and the first partial state sync fails. + """ + is_partial_state = True + end_sync: "Deferred[None]" = Deferred() + + async def is_partial_state_room(room_id: str) -> bool: + return is_partial_state + + async def sync_partial_state_room( + initial_destination: Optional[str], + other_destinations: Collection[str], + room_id: str, + ) -> None: + nonlocal end_sync + try: + await end_sync + finally: + end_sync = Deferred() + + mock_is_partial_state_room = Mock(side_effect=is_partial_state_room) + mock_sync_partial_state_room = Mock(side_effect=sync_partial_state_room) + + fed_handler = self.hs.get_federation_handler() + store = self.hs.get_datastores().main + + with patch.object( + fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room + ), patch.object(store, "is_partial_state_room", mock_is_partial_state_room): + # Start the partial state sync. + fed_handler._start_partial_state_room_sync("hs1", ["hs2"], "room_id") + self.assertEqual(mock_sync_partial_state_room.call_count, 1) + + # Fail the partial state sync. + # The partial state sync should not be restarted. + end_sync.errback(Exception("Failed to request /state_ids")) + self.assertEqual(mock_sync_partial_state_room.call_count, 1) + + # Start the partial state sync again. + fed_handler._start_partial_state_room_sync("hs1", ["hs2"], "room_id") + self.assertEqual(mock_sync_partial_state_room.call_count, 2) + + # Deduplicate another partial state sync. + fed_handler._start_partial_state_room_sync("hs3", ["hs2"], "room_id") + self.assertEqual(mock_sync_partial_state_room.call_count, 2) + + # Fail the partial state sync. + # It should restart with the latest parameters. + end_sync.errback(Exception("Failed to request /state_ids")) + self.assertEqual(mock_sync_partial_state_room.call_count, 3) + mock_sync_partial_state_room.assert_called_with( + initial_destination="hs3", + other_destinations=["hs2"], + room_id="room_id", + ) From cf18fea9e1a542b95749d28225172e8e2488fb37 Mon Sep 17 00:00:00 2001 From: katlol <1695469+katlol@users.noreply.github.com> Date: Fri, 20 Jan 2023 13:07:13 +0100 Subject: [PATCH 045/344] Dockerfile: Bump Python version from 3.9 to 3.11 (#14875) Closes https://github.com/matrix-org/synapse/issues/13234 Signed-off-by: Katia Esposito <1695469+katlol@users.noreply.github.com> Signed-off-by: Katia Esposito <1695469+katlol@users.noreply.github.com> --- changelog.d/14875.docker | 1 + docker/Dockerfile | 84 ++++++++++++++++++++-------------------- 2 files changed, 43 insertions(+), 42 deletions(-) create mode 100644 changelog.d/14875.docker diff --git a/changelog.d/14875.docker b/changelog.d/14875.docker new file mode 100644 index 000000000000..584fc104708d --- /dev/null +++ b/changelog.d/14875.docker @@ -0,0 +1 @@ +Bump default Python version in the Dockerfile from 3.9 to 3.11. diff --git a/docker/Dockerfile b/docker/Dockerfile index b2ec00591721..a85fd3d691c8 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -20,7 +20,7 @@ # `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in # in `poetry export` in the past. -ARG PYTHON_VERSION=3.9 +ARG PYTHON_VERSION=3.11 ### ### Stage 0: generate requirements.txt @@ -34,11 +34,11 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements # Here we use it to set up a cache for apt (and below for pip), to improve # rebuild speeds on slow connections. RUN \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update -qq && apt-get install -yqq \ - build-essential git libffi-dev libssl-dev \ - && rm -rf /var/lib/apt/lists/* + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update -qq && apt-get install -yqq \ + build-essential git libffi-dev libssl-dev \ + && rm -rf /var/lib/apt/lists/* # We install poetry in its own build stage to avoid its dependencies conflicting with # synapse's dependencies. @@ -64,9 +64,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE # Otherwise, just create an empty requirements file so that the Dockerfile can # proceed. RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ - /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \ + /root/.local/bin/poetry export --extras all -o /synapse/requirements.txt ${TEST_ONLY_SKIP_DEP_HASH_VERIFICATION:+--without-hashes}; \ else \ - touch /synapse/requirements.txt; \ + touch /synapse/requirements.txt; \ fi ### @@ -76,24 +76,24 @@ FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder # install the OS build deps RUN \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - apt-get update -qq && apt-get install -yqq \ - build-essential \ - libffi-dev \ - libjpeg-dev \ - libpq-dev \ - libssl-dev \ - libwebp-dev \ - libxml++2.6-dev \ - libxslt1-dev \ - openssl \ - zlib1g-dev \ - git \ - curl \ - libicu-dev \ - pkg-config \ - && rm -rf /var/lib/apt/lists/* + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + apt-get update -qq && apt-get install -yqq \ + build-essential \ + libffi-dev \ + libjpeg-dev \ + libpq-dev \ + libssl-dev \ + libwebp-dev \ + libxml++2.6-dev \ + libxslt1-dev \ + openssl \ + zlib1g-dev \ + git \ + curl \ + libicu-dev \ + pkg-config \ + && rm -rf /var/lib/apt/lists/* # Install rust and ensure its in the PATH @@ -134,9 +134,9 @@ ARG TEST_ONLY_IGNORE_POETRY_LOCKFILE RUN --mount=type=cache,target=/synapse/target,sharing=locked \ --mount=type=cache,target=${CARGO_HOME}/registry,sharing=locked \ if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ - pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \ + pip install --prefix="/install" --no-deps --no-warn-script-location /synapse[all]; \ else \ - pip install --prefix="/install" --no-warn-script-location /synapse[all]; \ + pip install --prefix="/install" --no-warn-script-location /synapse[all]; \ fi ### @@ -151,20 +151,20 @@ LABEL org.opencontainers.image.source='https://github.com/matrix-org/synapse.git LABEL org.opencontainers.image.licenses='Apache-2.0' RUN \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ + --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update -qq && apt-get install -yqq \ - curl \ - gosu \ - libjpeg62-turbo \ - libpq5 \ - libwebp6 \ - xmlsec1 \ - libjemalloc2 \ - libicu67 \ - libssl-dev \ - openssl \ - && rm -rf /var/lib/apt/lists/* + curl \ + gosu \ + libjpeg62-turbo \ + libpq5 \ + libwebp6 \ + xmlsec1 \ + libjemalloc2 \ + libicu67 \ + libssl-dev \ + openssl \ + && rm -rf /var/lib/apt/lists/* COPY --from=builder /install /usr/local COPY ./docker/start.py /start.py @@ -175,4 +175,4 @@ EXPOSE 8008/tcp 8009/tcp 8448/tcp ENTRYPOINT ["/start.py"] HEALTHCHECK --start-period=5s --interval=15s --timeout=5s \ - CMD curl -fSs http://localhost:8008/health || exit 1 + CMD curl -fSs http://localhost:8008/health || exit 1 From 65d03866936adb144631d263a8539a2cb060fd43 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 20 Jan 2023 18:02:18 +0000 Subject: [PATCH 046/344] Always notify replication when a stream advances (#14877) This ensures that all other workers are told about stream updates in a timely manner, without having to remember to manually poke replication. --- changelog.d/14877.misc | 1 + synapse/_scripts/synapse_port_db.py | 4 +++ synapse/notifier.py | 31 ++++++++++++++++--- synapse/server.py | 6 +++- .../storage/databases/main/account_data.py | 2 ++ synapse/storage/databases/main/cache.py | 1 + synapse/storage/databases/main/deviceinbox.py | 3 +- synapse/storage/databases/main/devices.py | 1 + .../storage/databases/main/end_to_end_keys.py | 5 ++- .../storage/databases/main/events_worker.py | 10 +++++- synapse/storage/databases/main/presence.py | 3 +- synapse/storage/databases/main/push_rule.py | 1 + synapse/storage/databases/main/pusher.py | 1 + synapse/storage/databases/main/receipts.py | 2 ++ synapse/storage/databases/main/room.py | 6 +++- synapse/storage/util/id_generators.py | 26 ++++++++++++++-- tests/module_api/test_api.py | 3 ++ tests/replication/tcp/test_handler.py | 23 +++++--------- tests/storage/test_id_generators.py | 4 +++ 19 files changed, 104 insertions(+), 29 deletions(-) create mode 100644 changelog.d/14877.misc diff --git a/changelog.d/14877.misc b/changelog.d/14877.misc new file mode 100644 index 000000000000..4e9c3fa33fdc --- /dev/null +++ b/changelog.d/14877.misc @@ -0,0 +1 @@ +Always notify replication when a stream advances automatically. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index c463b60b2620..5e137dbbf711 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -51,6 +51,7 @@ make_deferred_yieldable, run_in_background, ) +from synapse.notifier import ReplicationNotifier from synapse.storage.database import DatabasePool, LoggingTransaction, make_conn from synapse.storage.databases.main import PushRuleStore from synapse.storage.databases.main.account_data import AccountDataWorkerStore @@ -260,6 +261,9 @@ def get_instance_name(self) -> str: def should_send_federation(self) -> bool: return False + def get_replication_notifier(self) -> ReplicationNotifier: + return ReplicationNotifier() + class Porter: def __init__( diff --git a/synapse/notifier.py b/synapse/notifier.py index 26b97cf766c3..28f0d4a25afe 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -226,8 +226,7 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.pending_new_room_events: List[_PendingRoomEventEntry] = [] - # Called when there are new things to stream over replication - self.replication_callbacks: List[Callable[[], None]] = [] + self._replication_notifier = hs.get_replication_notifier() self._new_join_in_room_callbacks: List[Callable[[str, str], None]] = [] self._federation_client = hs.get_federation_http_client() @@ -279,7 +278,7 @@ def add_replication_callback(self, cb: Callable[[], None]) -> None: it needs to do any asynchronous work, a background thread should be started and wrapped with run_as_background_process. """ - self.replication_callbacks.append(cb) + self._replication_notifier.add_replication_callback(cb) def add_new_join_in_room_callback(self, cb: Callable[[str, str], None]) -> None: """Add a callback that will be called when a user joins a room. @@ -741,8 +740,7 @@ def _user_joined_room(self, user_id: str, room_id: str) -> None: def notify_replication(self) -> None: """Notify the any replication listeners that there's a new event""" - for cb in self.replication_callbacks: - cb() + self._replication_notifier.notify_replication() def notify_user_joined_room(self, event_id: str, room_id: str) -> None: for cb in self._new_join_in_room_callbacks: @@ -759,3 +757,26 @@ def notify_remote_server_up(self, server: str) -> None: # Tell the federation client about the fact the server is back up, so # that any in flight requests can be immediately retried. self._federation_client.wake_destination(server) + + +@attr.s(auto_attribs=True) +class ReplicationNotifier: + """Tracks callbacks for things that need to know about stream changes. + + This is separate from the notifier to avoid circular dependencies. + """ + + _replication_callbacks: List[Callable[[], None]] = attr.Factory(list) + + def add_replication_callback(self, cb: Callable[[], None]) -> None: + """Add a callback that will be called when some new data is available. + Callback is not given any arguments. It should *not* return a Deferred - if + it needs to do any asynchronous work, a background thread should be started and + wrapped with run_as_background_process. + """ + self._replication_callbacks.append(cb) + + def notify_replication(self) -> None: + """Notify the any replication listeners that there's a new event""" + for cb in self._replication_callbacks: + cb() diff --git a/synapse/server.py b/synapse/server.py index f4ab94c4f33c..9d6d268f490c 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -107,7 +107,7 @@ from synapse.http.matrixfederationclient import MatrixFederationHttpClient from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager from synapse.module_api import ModuleApi -from synapse.notifier import Notifier +from synapse.notifier import Notifier, ReplicationNotifier from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator from synapse.push.pusherpool import PusherPool from synapse.replication.tcp.client import ReplicationDataHandler @@ -389,6 +389,10 @@ def get_federation_server(self) -> FederationServer: def get_notifier(self) -> Notifier: return Notifier(self) + @cache_in_self + def get_replication_notifier(self) -> ReplicationNotifier: + return ReplicationNotifier() + @cache_in_self def get_auth(self) -> Auth: return Auth(self) diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 881d7089dbb2..8a359d7eb89c 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -75,6 +75,7 @@ def __init__( self._account_data_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + notifier=hs.get_replication_notifier(), stream_name="account_data", instance_name=self._instance_name, tables=[ @@ -95,6 +96,7 @@ def __init__( # SQLite). self._account_data_id_gen = StreamIdGenerator( db_conn, + hs.get_replication_notifier(), "room_account_data", "stream_id", extra_tables=[("room_tags_revisions", "stream_id")], diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 2179a8bf5922..5b6643169139 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -75,6 +75,7 @@ def __init__( self._cache_id_gen = MultiWriterIdGenerator( db_conn, database, + notifier=hs.get_replication_notifier(), stream_name="caches", instance_name=hs.get_instance_name(), tables=[ diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 713be91c5dd8..8e61aba4548d 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -91,6 +91,7 @@ def __init__( MultiWriterIdGenerator( db_conn=db_conn, db=database, + notifier=hs.get_replication_notifier(), stream_name="to_device", instance_name=self._instance_name, tables=[("device_inbox", "instance_name", "stream_id")], @@ -101,7 +102,7 @@ def __init__( else: self._can_write_to_device = True self._device_inbox_id_gen = StreamIdGenerator( - db_conn, "device_inbox", "stream_id" + db_conn, hs.get_replication_notifier(), "device_inbox", "stream_id" ) max_device_inbox_id = self._device_inbox_id_gen.get_current_token() diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index cd186c84726c..903606fb4664 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -92,6 +92,7 @@ def __init__( # class below that is used on the main process. self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( db_conn, + hs.get_replication_notifier(), "device_lists_stream", "stream_id", extra_tables=[ diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 4c691642e2b5..c4ac6c33ba54 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -1181,7 +1181,10 @@ def __init__( super().__init__(database, db_conn, hs) self._cross_signing_id_gen = StreamIdGenerator( - db_conn, "e2e_cross_signing_keys", "stream_id" + db_conn, + hs.get_replication_notifier(), + "e2e_cross_signing_keys", + "stream_id", ) async def set_e2e_device_keys( diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index d150fa8a943d..d8a8bcafb6ce 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -191,6 +191,7 @@ def __init__( self._stream_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + notifier=hs.get_replication_notifier(), stream_name="events", instance_name=hs.get_instance_name(), tables=[("events", "instance_name", "stream_ordering")], @@ -200,6 +201,7 @@ def __init__( self._backfill_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + notifier=hs.get_replication_notifier(), stream_name="backfill", instance_name=hs.get_instance_name(), tables=[("events", "instance_name", "stream_ordering")], @@ -217,12 +219,14 @@ def __init__( # SQLite). self._stream_id_gen = StreamIdGenerator( db_conn, + hs.get_replication_notifier(), "events", "stream_ordering", is_writer=hs.get_instance_name() in hs.config.worker.writers.events, ) self._backfill_id_gen = StreamIdGenerator( db_conn, + hs.get_replication_notifier(), "events", "stream_ordering", step=-1, @@ -300,6 +304,7 @@ def get_chain_id_txn(txn: Cursor) -> int: self._un_partial_stated_events_stream_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + notifier=hs.get_replication_notifier(), stream_name="un_partial_stated_event_stream", instance_name=hs.get_instance_name(), tables=[ @@ -311,7 +316,10 @@ def get_chain_id_txn(txn: Cursor) -> int: ) else: self._un_partial_stated_events_stream_id_gen = StreamIdGenerator( - db_conn, "un_partial_stated_event_stream", "stream_id" + db_conn, + hs.get_replication_notifier(), + "un_partial_stated_event_stream", + "stream_id", ) def get_un_partial_stated_events_token(self) -> int: diff --git a/synapse/storage/databases/main/presence.py b/synapse/storage/databases/main/presence.py index 7b60815043a6..beb210f8eefd 100644 --- a/synapse/storage/databases/main/presence.py +++ b/synapse/storage/databases/main/presence.py @@ -77,6 +77,7 @@ def __init__( self._presence_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + notifier=hs.get_replication_notifier(), stream_name="presence_stream", instance_name=self._instance_name, tables=[("presence_stream", "instance_name", "stream_id")], @@ -85,7 +86,7 @@ def __init__( ) else: self._presence_id_gen = StreamIdGenerator( - db_conn, "presence_stream", "stream_id" + db_conn, hs.get_replication_notifier(), "presence_stream", "stream_id" ) self.hs = hs diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 03182887d138..14ca167b34b4 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -118,6 +118,7 @@ def __init__( # class below that is used on the main process. self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( db_conn, + hs.get_replication_notifier(), "push_rules_stream", "stream_id", is_writer=hs.config.worker.worker_app is None, diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index 7f24a3b6ec5e..df53e726e62a 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -62,6 +62,7 @@ def __init__( # class below that is used on the main process. self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( db_conn, + hs.get_replication_notifier(), "pushers", "id", extra_tables=[("deleted_pushers", "stream_id")], diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 86f5bce5f08d..3468f354e60f 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -73,6 +73,7 @@ def __init__( self._receipts_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + notifier=hs.get_replication_notifier(), stream_name="receipts", instance_name=self._instance_name, tables=[("receipts_linearized", "instance_name", "stream_id")], @@ -91,6 +92,7 @@ def __init__( # SQLite). self._receipts_id_gen = StreamIdGenerator( db_conn, + hs.get_replication_notifier(), "receipts_linearized", "stream_id", is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts, diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 78906a5e1d9e..7264a33cd4ac 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -126,6 +126,7 @@ def __init__( self._un_partial_stated_rooms_stream_id_gen = MultiWriterIdGenerator( db_conn=db_conn, db=database, + notifier=hs.get_replication_notifier(), stream_name="un_partial_stated_room_stream", instance_name=self._instance_name, tables=[ @@ -137,7 +138,10 @@ def __init__( ) else: self._un_partial_stated_rooms_stream_id_gen = StreamIdGenerator( - db_conn, "un_partial_stated_room_stream", "stream_id" + db_conn, + hs.get_replication_notifier(), + "un_partial_stated_room_stream", + "stream_id", ) async def store_room( diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 8670ffbfa374..9adff3f4f523 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -20,6 +20,7 @@ from contextlib import contextmanager from types import TracebackType from typing import ( + TYPE_CHECKING, AsyncContextManager, ContextManager, Dict, @@ -49,6 +50,9 @@ from synapse.storage.types import Cursor from synapse.storage.util.sequence import PostgresSequenceGenerator +if TYPE_CHECKING: + from synapse.notifier import ReplicationNotifier + logger = logging.getLogger(__name__) @@ -182,6 +186,7 @@ class StreamIdGenerator(AbstractStreamIdGenerator): def __init__( self, db_conn: LoggingDatabaseConnection, + notifier: "ReplicationNotifier", table: str, column: str, extra_tables: Iterable[Tuple[str, str]] = (), @@ -205,6 +210,8 @@ def __init__( # The key and values are the same, but we never look at the values. self._unfinished_ids: OrderedDict[int, int] = OrderedDict() + self._notifier = notifier + def advance(self, instance_name: str, new_id: int) -> None: # Advance should never be called on a writer instance, only over replication if self._is_writer: @@ -227,6 +234,8 @@ def manager() -> Generator[int, None, None]: with self._lock: self._unfinished_ids.pop(next_id) + self._notifier.notify_replication() + return _AsyncCtxManagerWrapper(manager()) def get_next_mult(self, n: int) -> AsyncContextManager[Sequence[int]]: @@ -250,6 +259,8 @@ def manager() -> Generator[Sequence[int], None, None]: for next_id in next_ids: self._unfinished_ids.pop(next_id) + self._notifier.notify_replication() + return _AsyncCtxManagerWrapper(manager()) def get_current_token(self) -> int: @@ -296,6 +307,7 @@ def __init__( self, db_conn: LoggingDatabaseConnection, db: DatabasePool, + notifier: "ReplicationNotifier", stream_name: str, instance_name: str, tables: List[Tuple[str, str, str]], @@ -304,6 +316,7 @@ def __init__( positive: bool = True, ) -> None: self._db = db + self._notifier = notifier self._stream_name = stream_name self._instance_name = instance_name self._positive = positive @@ -535,7 +548,9 @@ def get_next(self) -> AsyncContextManager[int]: # Cast safety: the second argument to _MultiWriterCtxManager, multiple_ids, # controls the return type. If `None` or omitted, the context manager yields # a single integer stream_id; otherwise it yields a list of stream_ids. - return cast(AsyncContextManager[int], _MultiWriterCtxManager(self)) + return cast( + AsyncContextManager[int], _MultiWriterCtxManager(self, self._notifier) + ) def get_next_mult(self, n: int) -> AsyncContextManager[List[int]]: # If we have a list of instances that are allowed to write to this @@ -544,7 +559,10 @@ def get_next_mult(self, n: int) -> AsyncContextManager[List[int]]: raise Exception("Tried to allocate stream ID on non-writer") # Cast safety: see get_next. - return cast(AsyncContextManager[List[int]], _MultiWriterCtxManager(self, n)) + return cast( + AsyncContextManager[List[int]], + _MultiWriterCtxManager(self, self._notifier, n), + ) def get_next_txn(self, txn: LoggingTransaction) -> int: """ @@ -563,6 +581,7 @@ def get_next_txn(self, txn: LoggingTransaction) -> int: txn.call_after(self._mark_id_as_finished, next_id) txn.call_on_exception(self._mark_id_as_finished, next_id) + txn.call_after(self._notifier.notify_replication) # Update the `stream_positions` table with newly updated stream # ID (unless self._writers is not set in which case we don't @@ -787,6 +806,7 @@ class _MultiWriterCtxManager: """Async context manager returned by MultiWriterIdGenerator""" id_gen: MultiWriterIdGenerator + notifier: "ReplicationNotifier" multiple_ids: Optional[int] = None stream_ids: List[int] = attr.Factory(list) @@ -814,6 +834,8 @@ async def __aexit__( for i in self.stream_ids: self.id_gen._mark_id_as_finished(i) + self.notifier.notify_replication() + if exc_type is not None: return False diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 9919938e8071..8f88c0117d78 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -404,6 +404,9 @@ def test_send_local_online_presence_to_federation(self): self.module_api.send_local_online_presence_to([remote_user_id]) ) + # We don't always send out federation immediately, so we advance the clock. + self.reactor.advance(1000) + # Check that a presence update was sent as part of a federation transaction found_update = False calls = ( diff --git a/tests/replication/tcp/test_handler.py b/tests/replication/tcp/test_handler.py index 555922409d13..6e4055cc2102 100644 --- a/tests/replication/tcp/test_handler.py +++ b/tests/replication/tcp/test_handler.py @@ -14,7 +14,7 @@ from twisted.internet import defer -from synapse.replication.tcp.commands import PositionCommand, RdataCommand +from synapse.replication.tcp.commands import PositionCommand from tests.replication._base import BaseMultiWorkerStreamTestCase @@ -111,20 +111,14 @@ def test_wait_for_stream_position(self) -> None: next_token = self.get_success(ctx.__aenter__()) self.get_success(ctx.__aexit__(None, None, None)) - cmd_handler.send_command( - RdataCommand("caches", "worker1", next_token, ("func_name", [], 0)) - ) - self.replicate() - self.get_success( data_handler.wait_for_stream_position("worker1", "caches", next_token) ) - # `wait_for_stream_position` should only return once master receives an - # RDATA from the worker - ctx = cache_id_gen.get_next() - next_token = self.get_success(ctx.__aenter__()) - self.get_success(ctx.__aexit__(None, None, None)) + # `wait_for_stream_position` should only return once master receives a + # notification that `next_token` has persisted. + ctx_worker1 = cache_id_gen.get_next() + next_token = self.get_success(ctx_worker1.__aenter__()) d = defer.ensureDeferred( data_handler.wait_for_stream_position("worker1", "caches", next_token) @@ -142,10 +136,7 @@ def test_wait_for_stream_position(self) -> None: ) self.assertFalse(d.called) - # ... but receiving the RDATA should - cmd_handler.send_command( - RdataCommand("caches", "worker1", next_token, ("func_name", [], 0)) - ) - self.replicate() + # ... but worker1 finishing (and so sending an update) should. + self.get_success(ctx_worker1.__aexit__(None, None, None)) self.assertTrue(d.called) diff --git a/tests/storage/test_id_generators.py b/tests/storage/test_id_generators.py index ff9691c518bc..9174fb096470 100644 --- a/tests/storage/test_id_generators.py +++ b/tests/storage/test_id_generators.py @@ -52,6 +52,7 @@ def _create_id_generator(self) -> StreamIdGenerator: def _create(conn: LoggingDatabaseConnection) -> StreamIdGenerator: return StreamIdGenerator( db_conn=conn, + notifier=self.hs.get_replication_notifier(), table="foobar", column="stream_id", ) @@ -196,6 +197,7 @@ def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: return MultiWriterIdGenerator( conn, self.db_pool, + notifier=self.hs.get_replication_notifier(), stream_name="test_stream", instance_name=instance_name, tables=[("foobar", "instance_name", "stream_id")], @@ -630,6 +632,7 @@ def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: return MultiWriterIdGenerator( conn, self.db_pool, + notifier=self.hs.get_replication_notifier(), stream_name="test_stream", instance_name=instance_name, tables=[("foobar", "instance_name", "stream_id")], @@ -766,6 +769,7 @@ def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: return MultiWriterIdGenerator( conn, self.db_pool, + notifier=self.hs.get_replication_notifier(), stream_name="test_stream", instance_name=instance_name, tables=[ From 0ec12a37538d0df07d96cfc9cf5f5208f7453607 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 20 Jan 2023 21:04:33 +0000 Subject: [PATCH 047/344] Reduce max time we wait for stream positions (#14881) Now that we wait for stream positions whenever we do a HTTP replication hit, we need to be less brutal in the case where we do timeout (as we have bugs around this). --- changelog.d/14881.misc | 1 + synapse/replication/http/_base.py | 2 -- synapse/replication/tcp/client.py | 21 +++++++++++---------- 3 files changed, 12 insertions(+), 12 deletions(-) create mode 100644 changelog.d/14881.misc diff --git a/changelog.d/14881.misc b/changelog.d/14881.misc new file mode 100644 index 000000000000..be89d092b6a0 --- /dev/null +++ b/changelog.d/14881.misc @@ -0,0 +1 @@ +Reduce max time we wait for stream positions. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 709327b97fb4..908f3f1db7da 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -352,7 +352,6 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: instance_name=instance_name, stream_name=stream_name, position=position, - raise_on_timeout=False, ) return result @@ -414,7 +413,6 @@ async def _check_auth_and_handle( instance_name=content[_STREAM_POSITION_KEY]["instance_name"], stream_name=stream_name, position=position, - raise_on_timeout=False, ) if self.CACHE: diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 6e242c57493e..493f61667999 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -59,7 +59,7 @@ logger = logging.getLogger(__name__) # How long we allow callers to wait for replication updates before timing out. -_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 30 +_WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5 class DirectTcpReplicationClientFactory(ReconnectingClientFactory): @@ -326,7 +326,6 @@ async def wait_for_stream_position( instance_name: str, stream_name: str, position: int, - raise_on_timeout: bool = True, ) -> None: """Wait until this instance has received updates up to and including the given stream position. @@ -335,8 +334,6 @@ async def wait_for_stream_position( instance_name stream_name position - raise_on_timeout: Whether to raise an exception if we time out - waiting for the updates, or if we log an error and return. """ if instance_name == self._instance_name: @@ -365,19 +362,23 @@ async def wait_for_stream_position( # We measure here to get in flight counts and average waiting time. with Measure(self._clock, "repl.wait_for_stream_position"): - logger.info("Waiting for repl stream %r to reach %s", stream_name, position) + logger.info( + "Waiting for repl stream %r to reach %s (%s)", + stream_name, + position, + instance_name, + ) try: await make_deferred_yieldable(deferred) except defer.TimeoutError: logger.error("Timed out waiting for stream %s", stream_name) - - if raise_on_timeout: - raise - return logger.info( - "Finished waiting for repl stream %r to reach %s", stream_name, position + "Finished waiting for repl stream %r to reach %s (%s)", + stream_name, + position, + instance_name, ) def stop_pusher(self, user_id: str, app_id: str, pushkey: str) -> None: From 8d90e5f2006c4b9bad4b7b4bc164103480886da2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Sat, 21 Jan 2023 15:59:15 +0000 Subject: [PATCH 048/344] Add type hints to `TestRatelimiter` (#14885) --- changelog.d/14885.misc | 1 + mypy.ini | 1 - tests/api/test_ratelimiting.py | 66 ++++++++++++++++++++++++++-------- 3 files changed, 52 insertions(+), 16 deletions(-) create mode 100644 changelog.d/14885.misc diff --git a/changelog.d/14885.misc b/changelog.d/14885.misc new file mode 100644 index 000000000000..9f5384e60e78 --- /dev/null +++ b/changelog.d/14885.misc @@ -0,0 +1 @@ +Add missing type hints. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 468bfe588ccc..3bbf62c95256 100644 --- a/mypy.ini +++ b/mypy.ini @@ -33,7 +33,6 @@ exclude = (?x) |synapse/storage/schema/ |tests/api/test_auth.py - |tests/api/test_ratelimiting.py |tests/app/test_openid_listener.py |tests/appservice/test_scheduler.py |tests/events/test_presence_router.py diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index c86f783c5bd4..b5fd08d43711 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -8,7 +8,10 @@ class TestRatelimiter(unittest.HomeserverTestCase): def test_allowed_via_can_do_action(self): limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=1, ) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(None, key="test_id", _time_now_s=0) @@ -30,7 +33,7 @@ def test_allowed_via_can_do_action(self): def test_allowed_appservice_ratelimited_via_can_requester_do_action(self): appservice = ApplicationService( - None, + token="fake_token", id="foo", rate_limited=True, sender="@as:example.com", @@ -38,7 +41,10 @@ def test_allowed_appservice_ratelimited_via_can_requester_do_action(self): as_requester = create_requester("@user:example.com", app_service=appservice) limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=1, ) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(as_requester, _time_now_s=0) @@ -60,7 +66,7 @@ def test_allowed_appservice_ratelimited_via_can_requester_do_action(self): def test_allowed_appservice_via_can_requester_do_action(self): appservice = ApplicationService( - None, + token="fake_token", id="foo", rate_limited=False, sender="@as:example.com", @@ -68,7 +74,10 @@ def test_allowed_appservice_via_can_requester_do_action(self): as_requester = create_requester("@user:example.com", app_service=appservice) limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=1, ) allowed, time_allowed = self.get_success_or_raise( limiter.can_do_action(as_requester, _time_now_s=0) @@ -90,7 +99,10 @@ def test_allowed_appservice_via_can_requester_do_action(self): def test_allowed_via_ratelimit(self): limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=1, ) # Shouldn't raise @@ -114,7 +126,10 @@ def test_allowed_via_can_do_action_and_overriding_parameters(self): """ # Create a Ratelimiter with a very low allowed rate_hz and burst_count limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=1, ) # First attempt should be allowed @@ -160,7 +175,10 @@ def test_allowed_via_ratelimit_and_overriding_parameters(self): """ # Create a Ratelimiter with a very low allowed rate_hz and burst_count limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=1, ) # First attempt should be allowed @@ -188,7 +206,10 @@ def test_allowed_via_ratelimit_and_overriding_parameters(self): def test_pruning(self): limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=1 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=1, ) self.get_success_or_raise( limiter.can_do_action(None, key="test_id_1", _time_now_s=0) @@ -223,7 +244,7 @@ def test_db_user_override(self): ) ) - limiter = Ratelimiter(store=store, clock=None, rate_hz=0.1, burst_count=1) + limiter = Ratelimiter(store=store, clock=self.clock, rate_hz=0.1, burst_count=1) # Shouldn't raise for _ in range(20): @@ -231,7 +252,10 @@ def test_db_user_override(self): def test_multiple_actions(self): limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=3, ) # Test that 4 actions aren't allowed with a maximum burst of 3. allowed, time_allowed = self.get_success_or_raise( @@ -295,7 +319,10 @@ def test_rate_limit_burst_only_given_once(self) -> None: extra tokens by timing requests. """ limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=3, ) def consume_at(time: float) -> bool: @@ -317,7 +344,10 @@ def consume_at(time: float) -> bool: def test_record_action_which_doesnt_fill_bucket(self) -> None: limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=3, ) # Observe two actions, leaving room in the bucket for one more. @@ -337,7 +367,10 @@ def test_record_action_which_doesnt_fill_bucket(self) -> None: def test_record_action_which_fills_bucket(self) -> None: limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=3, ) # Observe three actions, filling up the bucket. @@ -363,7 +396,10 @@ def test_record_action_which_fills_bucket(self) -> None: def test_record_action_which_overfills_bucket(self) -> None: limiter = Ratelimiter( - store=self.hs.get_datastores().main, clock=None, rate_hz=0.1, burst_count=3 + store=self.hs.get_datastores().main, + clock=self.clock, + rate_hz=0.1, + burst_count=3, ) # Observe four actions, exceeding the bucket. From f075f6ae2bfb87495283128865d535466e86bfa1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Sun, 22 Jan 2023 09:50:14 +0000 Subject: [PATCH 049/344] Fix type hints for Monthly Active Users tests (#14889) --- changelog.d/14889.misc | 1 + mypy.ini | 1 - .../test_resource_limits_server_notices.py | 13 +++++++------ 3 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/14889.misc diff --git a/changelog.d/14889.misc b/changelog.d/14889.misc new file mode 100644 index 000000000000..9f5384e60e78 --- /dev/null +++ b/changelog.d/14889.misc @@ -0,0 +1 @@ +Add missing type hints. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 3bbf62c95256..63366dad99cc 100644 --- a/mypy.ini +++ b/mypy.ini @@ -50,7 +50,6 @@ exclude = (?x) |tests/rest/client/test_transactions.py |tests/rest/media/v1/test_media_storage.py |tests/server.py - |tests/server_notices/test_resource_limits_server_notices.py |tests/test_state.py |tests/test_terms_auth.py )$ diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 7cbc40736c59..dadc6efcbf75 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -69,7 +69,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self._rlsn._store.user_last_seen_monthly_active = Mock( return_value=make_awaitable(1000) ) - self._rlsn._server_notices_manager.send_notice = Mock( + self._rlsn._server_notices_manager.send_notice = Mock( # type: ignore[assignment] return_value=make_awaitable(Mock()) ) self._send_notice = self._rlsn._server_notices_manager.send_notice @@ -82,8 +82,8 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self._rlsn._server_notices_manager.maybe_get_notice_room_for_user = Mock( return_value=make_awaitable("!something:localhost") ) - self._rlsn._store.add_tag_to_room = Mock(return_value=make_awaitable(None)) - self._rlsn._store.get_tags_for_room = Mock(return_value=make_awaitable({})) + self._rlsn._store.add_tag_to_room = Mock(return_value=make_awaitable(None)) # type: ignore[assignment] + self._rlsn._store.get_tags_for_room = Mock(return_value=make_awaitable({})) # type: ignore[assignment] @override_config({"hs_disabled": True}) def test_maybe_send_server_notice_disabled_hs(self): @@ -361,9 +361,10 @@ def _trigger_notice_and_join(self) -> Tuple[str, str, str]: tok: The access token of the user that joined the room. room_id: The ID of the room that's been joined. """ - user_id = None - tok = None - invites = [] + # We need at least one user to process + self.assertGreater(self.hs.config.server.max_mau_value, 0) + + invites = {} # Register as many users as the MAU limit allows. for i in range(self.hs.config.server.max_mau_value): From d329a566df6ff2b635a375bf1b2c8ed3b2c9815d Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Sun, 22 Jan 2023 19:19:31 +0000 Subject: [PATCH 050/344] Faster joins: Fix incompatibility with restricted joins (#14882) * Avoid clearing out forward extremities when doing a second remote join When joining a restricted room where the local homeserver does not have a user able to issue invites, we perform a second remote join. We want to avoid clearing out forward extremities in this case because the forward extremities we have are up to date and clearing out forward extremities creates a window in which the room can get bricked if Synapse crashes. Signed-off-by: Sean Quah * Do a full join when doing a second remote join into a full state room We cannot persist a partial state join event into a joined full state room, so we perform a full state join for such rooms instead. As a future optimization, we could always perform a partial state join and compute or retrieve the full state ourselves if necessary. Signed-off-by: Sean Quah * Add lock around partial state flag for rooms Signed-off-by: Sean Quah * Preserve partial state info when doing a second partial state join Signed-off-by: Sean Quah * Add newsfile * Add a TODO(faster_joins) marker Signed-off-by: Sean Quah --- changelog.d/14882.bugfix | 1 + synapse/federation/federation_client.py | 5 + synapse/handlers/federation.py | 215 +++++++++++++++--------- 3 files changed, 140 insertions(+), 81 deletions(-) create mode 100644 changelog.d/14882.bugfix diff --git a/changelog.d/14882.bugfix b/changelog.d/14882.bugfix new file mode 100644 index 000000000000..1fda344361b5 --- /dev/null +++ b/changelog.d/14882.bugfix @@ -0,0 +1 @@ +Faster joins: Fix incompatibility with joins into restricted rooms where no local users have the ability to invite. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 15a9a8830271..f185b6c1f9a4 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1157,6 +1157,11 @@ async def _execute(pdu: EventBase) -> None: "members_omitted was set, but no servers were listed in the room" ) + if response.members_omitted and not partial_state: + raise InvalidResponseError( + "members_omitted was set, but we asked for full state" + ) + return SendJoinResult( event=event, state=signed_state, diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index e386f77de6d9..2123ace8a6cc 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -48,7 +48,6 @@ FederationError, FederationPullAttemptBackoffError, HttpResponseException, - LimitExceededError, NotFoundError, RequestSendFailed, SynapseError, @@ -182,6 +181,12 @@ def __init__(self, hs: "HomeServer"): self._partial_state_syncs_maybe_needing_restart: Dict[ str, Tuple[Optional[str], Collection[str]] ] = {} + # A lock guarding the partial state flag for rooms. + # When the lock is held for a given room, no other concurrent code may + # partial state or un-partial state the room. + self._is_partial_state_room_linearizer = Linearizer( + name="_is_partial_state_room_linearizer" + ) # if this is the main process, fire off a background process to resume # any partial-state-resync operations which were in flight when we @@ -599,7 +604,23 @@ async def do_invite_join( self._federation_event_handler.room_queues[room_id] = [] - await self._clean_room_for_join(room_id) + is_host_joined = await self.store.is_host_joined(room_id, self.server_name) + + if not is_host_joined: + # We may have old forward extremities lying around if the homeserver left + # the room completely in the past. Clear them out. + # + # Note that this check-then-clear is subject to races where + # * the homeserver is in the room and stops being in the room just after + # the check. We won't reset the forward extremities, but that's okay, + # since they will be almost up to date. + # * the homeserver is not in the room and starts being in the room just + # after the check. This can't happen, since `RoomMemberHandler` has a + # linearizer lock which prevents concurrent remote joins into the same + # room. + # In short, the races either have an acceptable outcome or should be + # impossible. + await self._clean_room_for_join(room_id) try: # Try the host we successfully got a response to /make_join/ @@ -611,91 +632,115 @@ async def do_invite_join( except ValueError: pass - ret = await self.federation_client.send_join( - host_list, event, room_version_obj - ) - - event = ret.event - origin = ret.origin - state = ret.state - auth_chain = ret.auth_chain - auth_chain.sort(key=lambda e: e.depth) - - logger.debug("do_invite_join auth_chain: %s", auth_chain) - logger.debug("do_invite_join state: %s", state) - - logger.debug("do_invite_join event: %s", event) + async with self._is_partial_state_room_linearizer.queue(room_id): + already_partial_state_room = await self.store.is_partial_state_room( + room_id + ) - # if this is the first time we've joined this room, it's time to add - # a row to `rooms` with the correct room version. If there's already a - # row there, we should override it, since it may have been populated - # based on an invite request which lied about the room version. - # - # federation_client.send_join has already checked that the room - # version in the received create event is the same as room_version_obj, - # so we can rely on it now. - # - await self.store.upsert_room_on_join( - room_id=room_id, - room_version=room_version_obj, - state_events=state, - ) + ret = await self.federation_client.send_join( + host_list, + event, + room_version_obj, + # Perform a full join when we are already in the room and it is a + # full state room, since we are not allowed to persist a partial + # state join event in a full state room. In the future, we could + # optimize this by always performing a partial state join and + # computing the state ourselves or retrieving it from the remote + # homeserver if necessary. + # + # There's a race where we leave the room, then perform a full join + # anyway. This should end up being fast anyway, since we would + # already have the full room state and auth chain persisted. + partial_state=not is_host_joined or already_partial_state_room, + ) - if ret.partial_state: - # Mark the room as having partial state. - # The background process is responsible for unmarking this flag, - # even if the join fails. - await self.store.store_partial_state_room( + event = ret.event + origin = ret.origin + state = ret.state + auth_chain = ret.auth_chain + auth_chain.sort(key=lambda e: e.depth) + + logger.debug("do_invite_join auth_chain: %s", auth_chain) + logger.debug("do_invite_join state: %s", state) + + logger.debug("do_invite_join event: %s", event) + + # if this is the first time we've joined this room, it's time to add + # a row to `rooms` with the correct room version. If there's already a + # row there, we should override it, since it may have been populated + # based on an invite request which lied about the room version. + # + # federation_client.send_join has already checked that the room + # version in the received create event is the same as room_version_obj, + # so we can rely on it now. + # + await self.store.upsert_room_on_join( room_id=room_id, - servers=ret.servers_in_room, - device_lists_stream_id=self.store.get_device_stream_token(), - joined_via=origin, + room_version=room_version_obj, + state_events=state, ) - try: - max_stream_id = ( - await self._federation_event_handler.process_remote_join( - origin, - room_id, - auth_chain, - state, - event, - room_version_obj, - partial_state=ret.partial_state, + if ret.partial_state and not already_partial_state_room: + # Mark the room as having partial state. + # The background process is responsible for unmarking this flag, + # even if the join fails. + # TODO(faster_joins): + # We may want to reset the partial state info if it's from an + # old, failed partial state join. + # https://github.com/matrix-org/synapse/issues/13000 + await self.store.store_partial_state_room( + room_id=room_id, + servers=ret.servers_in_room, + device_lists_stream_id=self.store.get_device_stream_token(), + joined_via=origin, ) - ) - except PartialStateConflictError as e: - # The homeserver was already in the room and it is no longer partial - # stated. We ought to be doing a local join instead. Turn the error into - # a 429, as a hint to the client to try again. - # TODO(faster_joins): `_should_perform_remote_join` suggests that we may - # do a remote join for restricted rooms even if we have full state. - logger.error( - "Room %s was un-partial stated while processing remote join.", - room_id, - ) - raise LimitExceededError(msg=e.msg, errcode=e.errcode, retry_after_ms=0) - else: - # Record the join event id for future use (when we finish the full - # join). We have to do this after persisting the event to keep foreign - # key constraints intact. - if ret.partial_state: - await self.store.write_partial_state_rooms_join_event_id( - room_id, event.event_id + + try: + max_stream_id = ( + await self._federation_event_handler.process_remote_join( + origin, + room_id, + auth_chain, + state, + event, + room_version_obj, + partial_state=ret.partial_state, + ) ) - finally: - # Always kick off the background process that asynchronously fetches - # state for the room. - # If the join failed, the background process is responsible for - # cleaning up — including unmarking the room as a partial state room. - if ret.partial_state: - # Kick off the process of asynchronously fetching the state for this - # room. - self._start_partial_state_room_sync( - initial_destination=origin, - other_destinations=ret.servers_in_room, - room_id=room_id, + except PartialStateConflictError: + # This should be impossible, since we hold the lock on the room's + # partial statedness. + logger.error( + "Room %s was un-partial stated while processing remote join.", + room_id, ) + raise + else: + # Record the join event id for future use (when we finish the full + # join). We have to do this after persisting the event to keep + # foreign key constraints intact. + if ret.partial_state and not already_partial_state_room: + # TODO(faster_joins): + # We may want to reset the partial state info if it's from + # an old, failed partial state join. + # https://github.com/matrix-org/synapse/issues/13000 + await self.store.write_partial_state_rooms_join_event_id( + room_id, event.event_id + ) + finally: + # Always kick off the background process that asynchronously fetches + # state for the room. + # If the join failed, the background process is responsible for + # cleaning up — including unmarking the room as a partial state + # room. + if ret.partial_state: + # Kick off the process of asynchronously fetching the state for + # this room. + self._start_partial_state_room_sync( + initial_destination=origin, + other_destinations=ret.servers_in_room, + room_id=room_id, + ) # We wait here until this instance has seen the events come down # replication (if we're using replication) as the below uses caches. @@ -1778,6 +1823,12 @@ async def _sync_partial_state_room( `initial_destination` is unavailable room_id: room to be resynced """ + # Assume that we run on the main process for now. + # TODO(faster_joins,multiple workers) + # When moving the sync to workers, we need to ensure that + # * `_start_partial_state_room_sync` still prevents duplicate resyncs + # * `_is_partial_state_room_linearizer` correctly guards partial state flags + # for rooms between the workers doing remote joins and resync. assert not self.config.worker.worker_app # TODO(faster_joins): do we need to lock to avoid races? What happens if other @@ -1815,8 +1866,10 @@ async def _sync_partial_state_room( logger.info("Handling any pending device list updates") await self._device_handler.handle_room_un_partial_stated(room_id) - logger.info("Clearing partial-state flag for %s", room_id) - success = await self.store.clear_partial_state_room(room_id) + async with self._is_partial_state_room_linearizer.queue(room_id): + logger.info("Clearing partial-state flag for %s", room_id) + success = await self.store.clear_partial_state_room(room_id) + if success: logger.info("State resync complete for %s", room_id) self._storage_controllers.state.notify_room_un_partial_stated( From 22cc93afe38d34c859d8863a99996e7e72ca1733 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Sun, 22 Jan 2023 21:10:11 +0000 Subject: [PATCH 051/344] Enable Faster Remote Room Joins against worker-mode Synapse. (#14752) * Enable Complement tests for Faster Remote Room Joins on worker-mode * (dangerous) Add an override to allow Complement to use FRRJ under workers * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * Fix race where we didn't send out replication notification * MORE HACKS * Fix get_un_partial_stated_rooms_token to take instance_name * Fix bad merge * Remove warning * Correctly advance un_partial_stated_room_stream * Fix merge * Add another notify_replication * Fixups * Create a separate ReplicationNotifier * Fix test * Fix portdb * Create a separate ReplicationNotifier * Fix test * Fix portdb * Fix presence test * Newsfile * Apply suggestions from code review * Update changelog.d/14752.misc Co-authored-by: Erik Johnston * lint Signed-off-by: Olivier Wilkinson (reivilibre) Co-authored-by: Erik Johnston --- changelog.d/14752.misc | 1 + .../conf/workers-shared-extra.yaml.j2 | 2 -- scripts-dev/complement.sh | 11 ++++------- synapse/app/generic_worker.py | 7 ------- synapse/handlers/device.py | 2 ++ synapse/handlers/federation.py | 7 ++++--- .../replication/tcp/streams/partial_state.py | 7 ++----- .../storage/databases/main/events_worker.py | 13 ++++++++----- synapse/storage/databases/main/room.py | 19 ++++++++++++------- synapse/storage/databases/main/state.py | 2 ++ 10 files changed, 35 insertions(+), 36 deletions(-) create mode 100644 changelog.d/14752.misc diff --git a/changelog.d/14752.misc b/changelog.d/14752.misc new file mode 100644 index 000000000000..1f9675c53bca --- /dev/null +++ b/changelog.d/14752.misc @@ -0,0 +1 @@ +Enable Complement tests for Faster Remote Room Joins against worker-mode Synapse. \ No newline at end of file diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index 281157846ace..63acf86a4619 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -94,10 +94,8 @@ allow_device_name_lookup_over_federation: true experimental_features: # Enable history backfilling support msc2716_enabled: true - {% if not workers_in_use %} # client-side support for partial state in /send_join responses faster_joins: true - {% endif %} # Enable support for polls msc3381_polls_enabled: true # Enable deleting device-specific notification settings stored in account data diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index a183653d5210..e72d96fd165c 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -190,7 +190,7 @@ fi extra_test_args=() -test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930" +test_tags="synapse_blacklist,msc3787,msc3874,msc3890,msc3391,msc3930,faster_joins" # All environment variables starting with PASS_ will be shared. # (The prefix is stripped off before reaching the container.) @@ -223,12 +223,9 @@ else export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite fi - # We only test faster room joins on monoliths, because they are purposefully - # being developed without worker support to start with. - # - # The tests for importing historical messages (MSC2716) also only pass with monoliths, - # currently. - test_tags="$test_tags,faster_joins,msc2716" + # The tests for importing historical messages (MSC2716) + # only pass with monoliths, currently. + test_tags="$test_tags,msc2716" fi diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 8108b1e98f7f..946f3a380744 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -282,13 +282,6 @@ def start(config_options: List[str]) -> None: "synapse.app.user_dir", ) - if config.experimental.faster_joins_enabled: - raise ConfigError( - "You have enabled the experimental `faster_joins` config option, but it is " - "not compatible with worker deployments yet. Please disable `faster_joins` " - "or run Synapse as a single process deployment instead." - ) - synapse.events.USE_FROZEN_DICTS = config.server.use_frozen_dicts synapse.util.caches.TRACK_MEMORY_USAGE = config.caches.track_memory_usage diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 0640ea79a03d..58180ae2faba 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -974,6 +974,7 @@ def __init__(self, hs: "HomeServer", device_handler: DeviceHandler): self.federation = hs.get_federation_client() self.clock = hs.get_clock() self.device_handler = device_handler + self._notifier = hs.get_notifier() self._remote_edu_linearizer = Linearizer(name="remote_device_list") @@ -1054,6 +1055,7 @@ async def incoming_device_list_update( user_id, device_id, ) + self._notifier.notify_replication() room_ids = await self.store.get_rooms_for_user(user_id) if not room_ids: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2123ace8a6cc..7620245e2642 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1870,14 +1870,15 @@ async def _sync_partial_state_room( logger.info("Clearing partial-state flag for %s", room_id) success = await self.store.clear_partial_state_room(room_id) + # Poke the notifier so that other workers see the write to + # the un-partial-stated rooms stream. + self._notifier.notify_replication() + if success: logger.info("State resync complete for %s", room_id) self._storage_controllers.state.notify_room_un_partial_stated( room_id ) - # Poke the notifier so that other workers see the write to - # the un-partial-stated rooms stream. - self._notifier.notify_replication() # TODO(faster_joins) update room stats and user directory? # https://github.com/matrix-org/synapse/issues/12814 diff --git a/synapse/replication/tcp/streams/partial_state.py b/synapse/replication/tcp/streams/partial_state.py index b5a2ae74b685..a8ce5ffd7289 100644 --- a/synapse/replication/tcp/streams/partial_state.py +++ b/synapse/replication/tcp/streams/partial_state.py @@ -16,7 +16,6 @@ import attr from synapse.replication.tcp.streams import Stream -from synapse.replication.tcp.streams._base import current_token_without_instance if TYPE_CHECKING: from synapse.server import HomeServer @@ -42,8 +41,7 @@ def __init__(self, hs: "HomeServer"): store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - # TODO(faster_joins, multiple writers): we need to account for instance names - current_token_without_instance(store.get_un_partial_stated_rooms_token), + store.get_un_partial_stated_rooms_token, store.get_un_partial_stated_rooms_from_stream, ) @@ -70,7 +68,6 @@ def __init__(self, hs: "HomeServer"): store = hs.get_datastores().main super().__init__( hs.get_instance_name(), - # TODO(faster_joins, multiple writers): we need to account for instance names - current_token_without_instance(store.get_un_partial_stated_events_token), + store.get_un_partial_stated_events_token, store.get_un_partial_stated_events_from_stream, ) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index d8a8bcafb6ce..24127d0364ab 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -322,11 +322,12 @@ def get_chain_id_txn(txn: Cursor) -> int: "stream_id", ) - def get_un_partial_stated_events_token(self) -> int: - # TODO(faster_joins, multiple writers): This is inappropriate if there are multiple - # writers because workers that don't write often will hold all - # readers up. - return self._un_partial_stated_events_stream_id_gen.get_current_token() + def get_un_partial_stated_events_token(self, instance_name: str) -> int: + return ( + self._un_partial_stated_events_stream_id_gen.get_current_token_for_writer( + instance_name + ) + ) async def get_un_partial_stated_events_from_stream( self, instance_name: str, last_id: int, current_id: int, limit: int @@ -416,6 +417,8 @@ def process_replication_position( self._stream_id_gen.advance(instance_name, token) elif stream_name == BackfillStream.NAME: self._backfill_id_gen.advance(instance_name, -token) + elif stream_name == UnPartialStatedEventStream.NAME: + self._un_partial_stated_events_stream_id_gen.advance(instance_name, token) super().process_replication_position(stream_name, instance_name, token) async def have_censored_event(self, event_id: str) -> bool: diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 7264a33cd4ac..6a65b2a89bad 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -43,6 +43,7 @@ from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.config.homeserver import HomeServerConfig from synapse.events import EventBase +from synapse.replication.tcp.streams.partial_state import UnPartialStatedRoomStream from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( DatabasePool, @@ -144,6 +145,13 @@ def __init__( "stream_id", ) + def process_replication_position( + self, stream_name: str, instance_name: str, token: int + ) -> None: + if stream_name == UnPartialStatedRoomStream.NAME: + self._un_partial_stated_rooms_stream_id_gen.advance(instance_name, token) + return super().process_replication_position(stream_name, instance_name, token) + async def store_room( self, room_id: str, @@ -1281,13 +1289,10 @@ async def get_join_event_id_and_device_lists_stream_id_for_partial_state( ) return result["join_event_id"], result["device_lists_stream_id"] - def get_un_partial_stated_rooms_token(self) -> int: - # TODO(faster_joins, multiple writers): This is inappropriate if there - # are multiple writers because workers that don't write often will - # hold all readers up. - # (See `MultiWriterIdGenerator.get_persisted_upto_position` for an - # explanation.) - return self._un_partial_stated_rooms_stream_id_gen.get_current_token() + def get_un_partial_stated_rooms_token(self, instance_name: str) -> int: + return self._un_partial_stated_rooms_stream_id_gen.get_current_token_for_writer( + instance_name + ) async def get_un_partial_stated_rooms_from_stream( self, instance_name: str, last_id: int, current_id: int, limit: int diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index f32cbb2decd8..ba325d390b58 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -95,6 +95,7 @@ def process_replication_rows( for row in rows: assert isinstance(row, UnPartialStatedEventStreamRow) self._get_state_group_for_event.invalidate((row.event_id,)) + self.is_partial_state_event.invalidate((row.event_id,)) super().process_replication_rows(stream_name, instance_name, token, rows) @@ -485,6 +486,7 @@ def _update_state_for_partial_state_event_txn( "rejection_status_changed": rejection_status_changed, }, ) + txn.call_after(self.hs.get_notifier().on_new_replication_data) class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): From 2ec9c58496e2138cbc4364aba238997c393d5308 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Mon, 23 Jan 2023 10:31:36 +0000 Subject: [PATCH 052/344] Faster joins: Update room stats and the user directory on workers when finishing join (#14874) * Faster joins: Update room stats and user directory on workers when done When finishing a partial state join to a room, we update the current state of the room without persisting additional events. Workers receive notice of the current state update over replication, but neglect to wake the room stats and user directory updaters, which then get incidentally triggered the next time an event is persisted or an unrelated event persister sends out a stream position update. We wake the room stats and user directory updaters at the appropriate time in this commit. Part of #12814 and #12815. Signed-off-by: Sean Quah * fixup comment Signed-off-by: Sean Quah --- changelog.d/14874.bugfix | 1 + synapse/handlers/federation.py | 7 ++++--- synapse/replication/tcp/client.py | 6 ++++++ synapse/storage/controllers/state.py | 2 -- 4 files changed, 11 insertions(+), 5 deletions(-) create mode 100644 changelog.d/14874.bugfix diff --git a/changelog.d/14874.bugfix b/changelog.d/14874.bugfix new file mode 100644 index 000000000000..91ae2ea9bd5f --- /dev/null +++ b/changelog.d/14874.bugfix @@ -0,0 +1 @@ +Faster joins: Fix a bug in worker deployments where the room stats and user directory would not get updated when finishing a fast join until another event is sent or received. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7620245e2642..321712786584 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1880,9 +1880,10 @@ async def _sync_partial_state_room( room_id ) - # TODO(faster_joins) update room stats and user directory? - # https://github.com/matrix-org/synapse/issues/12814 - # https://github.com/matrix-org/synapse/issues/12815 + # Poke the notifier so that other workers see the write to + # the un-partial-stated rooms stream. + self._notifier.notify_replication() + return # we raced against more events arriving with partial state. Go round diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 493f61667999..2a9cb499a48d 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -207,6 +207,12 @@ async def on_rdata( # we don't need to optimise this for multiple rows. for row in rows: if row.type != EventsStreamEventRow.TypeId: + # The row's data is an `EventsStreamCurrentStateRow`. + # When we recompute the current state of a room based on forward + # extremities (see `update_current_state`), no new events are + # persisted, so we must poke the replication callbacks ourselves. + # This functionality is used when finishing up a partial state join. + self.notifier.notify_replication() continue assert isinstance(row, EventsStreamRow) assert isinstance(row.data, EventsStreamEventRow) diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 26d79c6e62f2..2045169b9a5d 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -493,8 +493,6 @@ async def get_current_state_deltas( up to date. """ # FIXME(faster_joins): what do we do here? - # https://github.com/matrix-org/synapse/issues/12814 - # https://github.com/matrix-org/synapse/issues/12815 # https://github.com/matrix-org/synapse/issues/13008 return await self.stores.main.get_partial_current_state_deltas( From 82d3efa3124f771579ba07553904f88625c443b0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 23 Jan 2023 06:36:20 -0500 Subject: [PATCH 053/344] Skip processing stats for broken rooms. (#14873) * Skip processing stats for broken rooms. * Newsfragment * Use a custom exception. --- changelog.d/14873.bugfix | 1 + .../storage/databases/main/events_worker.py | 6 +- synapse/storage/databases/main/stats.py | 13 ++- tests/storage/databases/main/test_room.py | 88 ++++++++++++------- 4 files changed, 72 insertions(+), 36 deletions(-) create mode 100644 changelog.d/14873.bugfix diff --git a/changelog.d/14873.bugfix b/changelog.d/14873.bugfix new file mode 100644 index 000000000000..9b058576cdb0 --- /dev/null +++ b/changelog.d/14873.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where the `populate_room_stats` background job could fail on broken rooms. diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 24127d0364ab..f42af34a2f28 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -110,6 +110,10 @@ ) +class InvalidEventError(Exception): + """The event retrieved from the database is invalid and cannot be used.""" + + @attr.s(slots=True, auto_attribs=True) class EventCacheEntry: event: EventBase @@ -1310,7 +1314,7 @@ async def _fetch_event_ids_and_get_outstanding_redactions( # invites, so just accept it for all membership events. # if d["type"] != EventTypes.Member: - raise Exception( + raise InvalidEventError( "Room %s for event %s is unknown" % (d["room_id"], event_id) ) diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 356d4ca78819..0c1cbd540d6e 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -29,6 +29,7 @@ LoggingDatabaseConnection, LoggingTransaction, ) +from synapse.storage.databases.main.events_worker import InvalidEventError from synapse.storage.databases.main.state_deltas import StateDeltasStore from synapse.types import JsonDict from synapse.util.caches.descriptors import cached @@ -554,7 +555,17 @@ def _fetch_current_state_stats( "get_initial_state_for_room", _fetch_current_state_stats ) - state_event_map = await self.get_events(event_ids, get_prev_content=False) # type: ignore[attr-defined] + try: + state_event_map = await self.get_events(event_ids, get_prev_content=False) # type: ignore[attr-defined] + except InvalidEventError as e: + # If an exception occurs fetching events then the room is broken; + # skip process it to avoid being stuck on a room. + logger.warning( + "Failed to fetch events for room %s, skipping stats calculation: %r.", + room_id, + e, + ) + return room_state: Dict[str, Union[None, bool, str]] = { "join_rules": None, diff --git a/tests/storage/databases/main/test_room.py b/tests/storage/databases/main/test_room.py index 7d961fac649c..3108ca344423 100644 --- a/tests/storage/databases/main/test_room.py +++ b/tests/storage/databases/main/test_room.py @@ -40,9 +40,23 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.token = self.login("foo", "pass") def _generate_room(self) -> str: - room_id = self.helper.create_room_as(self.user_id, tok=self.token) + """Create a room and return the room ID.""" + return self.helper.create_room_as(self.user_id, tok=self.token) - return room_id + def run_background_updates(self, update_name: str) -> None: + """Insert and run the background update.""" + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + {"update_name": update_name, "progress_json": "{}"}, + ) + ) + + # ... and tell the DataStore that it hasn't finished all updates yet + self.store.db_pool.updates._all_done = False + + # Now let's actually drive the updates to completion + self.wait_for_background_updates() def test_background_populate_rooms_creator_column(self) -> None: """Test that the background update to populate the rooms creator column @@ -71,22 +85,7 @@ def test_background_populate_rooms_creator_column(self) -> None: ) self.assertEqual(room_creator_before, None) - # Insert and run the background update. - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": _BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN, - "progress_json": "{}", - }, - ) - ) - - # ... and tell the DataStore that it hasn't finished all updates yet - self.store.db_pool.updates._all_done = False - - # Now let's actually drive the updates to completion - self.wait_for_background_updates() + self.run_background_updates(_BackgroundUpdates.POPULATE_ROOMS_CREATOR_COLUMN) # Make sure the background update filled in the room creator room_creator_after = self.get_success( @@ -137,22 +136,7 @@ def test_background_add_room_type_column(self) -> None: ) ) - # Insert and run the background update - self.get_success( - self.store.db_pool.simple_insert( - "background_updates", - { - "update_name": _BackgroundUpdates.ADD_ROOM_TYPE_COLUMN, - "progress_json": "{}", - }, - ) - ) - - # ... and tell the DataStore that it hasn't finished all updates yet - self.store.db_pool.updates._all_done = False - - # Now let's actually drive the updates to completion - self.wait_for_background_updates() + self.run_background_updates(_BackgroundUpdates.ADD_ROOM_TYPE_COLUMN) # Make sure the background update filled in the room type room_type_after = self.get_success( @@ -164,3 +148,39 @@ def test_background_add_room_type_column(self) -> None: ) ) self.assertEqual(room_type_after, RoomTypes.SPACE) + + def test_populate_stats_broken_rooms(self) -> None: + """Ensure that re-populating room stats skips broken rooms.""" + + # Create a good room. + good_room_id = self._generate_room() + + # Create a room and then break it by having no room version. + room_id = self._generate_room() + self.get_success( + self.store.db_pool.simple_update( + table="rooms", + keyvalues={"room_id": room_id}, + updatevalues={"room_version": None}, + desc="test", + ) + ) + + # Nuke any current stats in the database. + self.get_success( + self.store.db_pool.simple_delete( + table="room_stats_state", keyvalues={"1": 1}, desc="test" + ) + ) + + self.run_background_updates("populate_stats_process_rooms") + + # Only the good room appears in the stats tables. + results = self.get_success( + self.store.db_pool.simple_select_onecol( + table="room_stats_state", + keyvalues={}, + retcol="room_id", + ) + ) + self.assertEqual(results, [good_room_id]) From 6005befa2369c8edff7df06408a58d7c675c8488 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Jan 2023 09:13:26 -0500 Subject: [PATCH 054/344] Bump types-requests from 2.28.11.7 to 2.28.11.8 (#14899) --- changelog.d/14899.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14899.misc diff --git a/changelog.d/14899.misc b/changelog.d/14899.misc new file mode 100644 index 000000000000..f1ca951ec049 --- /dev/null +++ b/changelog.d/14899.misc @@ -0,0 +1 @@ +Bump types-requests from 2.28.11.7 to 2.28.11.8. diff --git a/poetry.lock b/poetry.lock index 178e3787f7d4..750d2011567c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2633,14 +2633,14 @@ files = [ [[package]] name = "types-requests" -version = "2.28.11.7" +version = "2.28.11.8" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.28.11.7.tar.gz", hash = "sha256:0ae38633734990d019b80f5463dfa164ebd3581998ac8435f526da6fe4d598c3"}, - {file = "types_requests-2.28.11.7-py3-none-any.whl", hash = "sha256:b6a2fca8109f4fdba33052f11ed86102bddb2338519e1827387137fefc66a98b"}, + {file = "types-requests-2.28.11.8.tar.gz", hash = "sha256:e67424525f84adfbeab7268a159d3c633862dafae15c5b19547ce1b55954f0a3"}, + {file = "types_requests-2.28.11.8-py3-none-any.whl", hash = "sha256:61960554baca0008ae7e2db2bd3b322ca9a144d3e80ce270f5fb640817e40994"}, ] [package.dependencies] From 641d3e3081404e7f97e1a7b0314c48de999608b6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Jan 2023 09:21:36 -0500 Subject: [PATCH 055/344] Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4 (#14900) --- changelog.d/14900.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14900.misc diff --git a/changelog.d/14900.misc b/changelog.d/14900.misc new file mode 100644 index 000000000000..69d6edb90752 --- /dev/null +++ b/changelog.d/14900.misc @@ -0,0 +1 @@ +Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4. diff --git a/poetry.lock b/poetry.lock index 750d2011567c..cb826c9f7951 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2594,14 +2594,14 @@ files = [ [[package]] name = "types-psycopg2" -version = "2.9.21.2" +version = "2.9.21.4" description = "Typing stubs for psycopg2" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-psycopg2-2.9.21.2.tar.gz", hash = "sha256:bff045579642ce00b4a3c8f2e401b7f96dfaa34939f10be64b0dd3b53feca57d"}, - {file = "types_psycopg2-2.9.21.2-py3-none-any.whl", hash = "sha256:084558d6bc4b2cfa249b06be0fdd9a14a69d307bae5bb5809a2f14cfbaa7a23f"}, + {file = "types-psycopg2-2.9.21.4.tar.gz", hash = "sha256:d43dda166a70d073ddac40718e06539836b5844c99b58ef8d4489a8df2edf5c0"}, + {file = "types_psycopg2-2.9.21.4-py3-none-any.whl", hash = "sha256:6a05dca0856996aa37d7abe436751803bf47ec006cabbefea092e057f23bc95d"}, ] [[package]] From 18ace676d8a1118250af661ecee50d35883608ab Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Jan 2023 09:22:38 -0500 Subject: [PATCH 056/344] Bump types-commonmark from 0.9.2 to 0.9.2.1 (#14901) --- changelog.d/14901.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14901.misc diff --git a/changelog.d/14901.misc b/changelog.d/14901.misc new file mode 100644 index 000000000000..21ccec0063b8 --- /dev/null +++ b/changelog.d/14901.misc @@ -0,0 +1 @@ +Bump types-commonmark from 0.9.2 to 0.9.2.1. diff --git a/poetry.lock b/poetry.lock index cb826c9f7951..2603b5382f8a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2494,14 +2494,14 @@ files = [ [[package]] name = "types-commonmark" -version = "0.9.2" +version = "0.9.2.1" description = "Typing stubs for commonmark" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-commonmark-0.9.2.tar.gz", hash = "sha256:b894b67750c52fd5abc9a40a9ceb9da4652a391d75c1b480bba9cef90f19fc86"}, - {file = "types_commonmark-0.9.2-py3-none-any.whl", hash = "sha256:56f20199a1f9a2924443211a0ef97f8b15a8a956a7f4e9186be6950bf38d6d02"}, + {file = "types-commonmark-0.9.2.1.tar.gz", hash = "sha256:db8277e6aeb83429265eccece98a24954a9a502dde7bc7cf840a8741abd96b86"}, + {file = "types_commonmark-0.9.2.1-py3-none-any.whl", hash = "sha256:9d5f500cb7eced801bde728137b0a10667bd853d328db641d03141f189e3aab4"}, ] [[package]] From 19f325387b75f1551776193bdfd6d0d2585d28c7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Jan 2023 09:26:15 -0500 Subject: [PATCH 057/344] Bump types-opentracing from 2.4.10 to 2.4.10.1 (#14896) --- changelog.d/14896.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14896.misc diff --git a/changelog.d/14896.misc b/changelog.d/14896.misc new file mode 100644 index 000000000000..4f8a6c3f1756 --- /dev/null +++ b/changelog.d/14896.misc @@ -0,0 +1 @@ +Bump types-opentracing from 2.4.10 to 2.4.10.1. diff --git a/poetry.lock b/poetry.lock index 2603b5382f8a..e2ab3e15d455 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2570,14 +2570,14 @@ files = [ [[package]] name = "types-opentracing" -version = "2.4.10" +version = "2.4.10.1" description = "Typing stubs for opentracing" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-opentracing-2.4.10.tar.gz", hash = "sha256:6101414f3b6d3b9c10f1c510a261e8439b6c8d67c723d5c2872084697b4580a7"}, - {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"}, + {file = "types-opentracing-2.4.10.1.tar.gz", hash = "sha256:49e7e52b8b6e221865a9201fc8c2df0bcda8e7098d4ebb35903dbfa4b4d29195"}, + {file = "types_opentracing-2.4.10.1-py3-none-any.whl", hash = "sha256:eb63394acd793e7d9e327956242349fee14580a87c025408dc268d4dd883cc24"}, ] [[package]] From 5e75771ecec2834db70a13f527dc17098b531507 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 Jan 2023 09:32:07 -0500 Subject: [PATCH 058/344] Bump ruff from 0.0.224 to 0.0.230 (#14897) --- changelog.d/14897.misc | 1 + poetry.lock | 36 ++++++++++++++++++------------------ pyproject.toml | 2 +- 3 files changed, 20 insertions(+), 19 deletions(-) create mode 100644 changelog.d/14897.misc diff --git a/changelog.d/14897.misc b/changelog.d/14897.misc new file mode 100644 index 000000000000..d192fa1c2f85 --- /dev/null +++ b/changelog.d/14897.misc @@ -0,0 +1 @@ +Bump ruff from 0.0.224 to 0.0.230. diff --git a/poetry.lock b/poetry.lock index e2ab3e15d455..17a6645b5588 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1906,28 +1906,28 @@ jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] [[package]] name = "ruff" -version = "0.0.224" +version = "0.0.230" description = "An extremely fast Python linter, written in Rust." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.224-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:015277c45716733e99a19267fd244870bff2b619d4814065fe5cded5ab139b92"}, - {file = "ruff-0.0.224-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:ca8211b316fa2df70d90e38819862d58e4aec87643c2c343ba5102a7ff5d3221"}, - {file = "ruff-0.0.224-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:637a502a37da3aac9832a0dd21255376b0320cf66e3d420d11000e09deb3e682"}, - {file = "ruff-0.0.224-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a85fe53b8193c3e9f7ca9bef7dfd3bcd079a86542a14b66f352ce0316de5c457"}, - {file = "ruff-0.0.224-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:367c74a9ff9da165df7dc1e5e1fae5c4cda05cb94202bc9a6e5836243bc625c1"}, - {file = "ruff-0.0.224-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:2d230fc497abdeb3b54d2808ba59802962b50e0611b558337d4c07e6d490ed6c"}, - {file = "ruff-0.0.224-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3df2bcb525fec6c2d551f7ab9843e42e8e4fa33586479953445ef64cbe6d6352"}, - {file = "ruff-0.0.224-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:30e494a23c2f23a07adbd4a9df526057a8cdb4c88536bbc513b5a73385c3d5a7"}, - {file = "ruff-0.0.224-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eac5836f89f1388b7bb718a5c77cdd13e356737573d29581a18d7f575e42124c"}, - {file = "ruff-0.0.224-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:d791073cfd40c8e697d4c79faa67e2ad54dc960854bfa0c0cba61ef4bb02d3b1"}, - {file = "ruff-0.0.224-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ef72a081dfe24bfb8aa0568e0f1ee0174fffbc6ebb0ae2b8b4cd3f9457cc867b"}, - {file = "ruff-0.0.224-py3-none-musllinux_1_2_i686.whl", hash = "sha256:666ecfcf0019f5b0e72e0eba7a7330760b680ba0fb6413b139a594b117e612db"}, - {file = "ruff-0.0.224-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:815d5d448e2bf5107340d6f47cbddac74186cb365c56bdcc2315fbcf59ebc466"}, - {file = "ruff-0.0.224-py3-none-win32.whl", hash = "sha256:17d98c1f03e98c15d3f2c49e0ffdedc57b221217c4e3d7b6f732893101083240"}, - {file = "ruff-0.0.224-py3-none-win_amd64.whl", hash = "sha256:3db4fe992cea69405061e09974c3955b750611b1e76161471c27cd2e8ccffa05"}, - {file = "ruff-0.0.224.tar.gz", hash = "sha256:3b07c2e8da29605a8577b1aef90f8ca0c34a66683b77b06007f1970bc0689003"}, + {file = "ruff-0.0.230-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:fcc31d02cebda0a85e2e13a44642aea7f84362cb4f589e2f6b864e3928e4a7db"}, + {file = "ruff-0.0.230-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:45a7f2c7155d520b8ca255a01235763d5c25fd5e7af055e50a78c6d91ece0ced"}, + {file = "ruff-0.0.230-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4eca8b185ab56cac67acc23287c3c8c62a0c0ffadc0787a3bef3a6e77eaed82f"}, + {file = "ruff-0.0.230-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec2bcdb5040efd8082a3a98369eec4bdc5fd05f53cc6714cb2b725d557d4abe8"}, + {file = "ruff-0.0.230-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:26571aee2b93b60e47e44478f72a9787b387f752e85b85f176739bd91b27cfd1"}, + {file = "ruff-0.0.230-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4b69c9883c3e264f8bb2d52bdabb88b8d9672750ea05f33e0ff52532824bd5c5"}, + {file = "ruff-0.0.230-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b3dc88b83f200378a9b9c91036989f0285a10759514c42235ce02e5824ac8d0"}, + {file = "ruff-0.0.230-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:767716f008dd3a40ec2318396f648fda437c6968087a4526cde5879e382cf477"}, + {file = "ruff-0.0.230-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac27a0f9b96d9923cef7d911790a21a19b51aec0f08375ccc47ad735b1054d78"}, + {file = "ruff-0.0.230-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:729dfc7b7ad4f7d8761dc60c58f15372d6f5c2dd9b6c5952524f2bc3aec7de6a"}, + {file = "ruff-0.0.230-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ad086cf2e5fef274687121f673f0f9b60c8981ec07c2bb0448c459cbaef81bcb"}, + {file = "ruff-0.0.230-py3-none-musllinux_1_2_i686.whl", hash = "sha256:4feaed0978c24687133cd11c7380de20aa841f893e24430c735cc6c3faba4837"}, + {file = "ruff-0.0.230-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1d1046d0d43a0f24b2e9e61d76bb201b486ad02e9787d3432af43bd7d16f2c2e"}, + {file = "ruff-0.0.230-py3-none-win32.whl", hash = "sha256:4d627911c9ba57bcd2f2776f1c09a10d334db163cb5be8c892e7ec7b59ccf58c"}, + {file = "ruff-0.0.230-py3-none-win_amd64.whl", hash = "sha256:27fd4891a1d0642f5b2038ebf86f8169bc3d466964bdfaa0ce2a65149bc7cced"}, + {file = "ruff-0.0.230.tar.gz", hash = "sha256:a049f93af1057ac450e8c09559d44e371eda1c151b1b863c0013a1066fefddb0"}, ] [[package]] @@ -2964,4 +2964,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "38867861f77c6faca817487efd02fbb7271b424d56744ad9ad248cd1dd297566" +content-hash = "2673ef0530a42dae1df998bacfcaf88a563529b39461003a980743a97f02996f" diff --git a/pyproject.toml b/pyproject.toml index d54dde4a2ffe..400eec6ac200 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -309,7 +309,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.224" +ruff = "0.0.230" # Typechecking mypy = "*" From 80d44060c99e87c84da72fdfcaa9a508d38a26b4 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 23 Jan 2023 15:44:39 +0000 Subject: [PATCH 059/344] Faster joins: omit partial rooms from eager syncs until the resync completes (#14870) * Allow `AbstractSet` in `StrCollection` Or else frozensets are excluded. This will be useful in an upcoming commit where I plan to change a function that accepts `List[str]` to accept `StrCollection` instead. * `rooms_to_exclude` -> `rooms_to_exclude_globally` I am about to make use of this exclusion mechanism to exclude rooms for a specific user and a specific sync. This rename helps to clarify the distinction between the global config and the rooms to exclude for a specific sync. * Better function names for internal sync methods * Track a list of excluded rooms on SyncResultBuilder I plan to feed a list of partially stated rooms for this sync to ignore * Exclude partial state rooms during eager sync using the mechanism established in the previous commit * Track un-partial-state stream in sync tokens So that we can work out which rooms have become fully-stated during a given sync period. * Fix mutation of `@cached` return value This was fouling up a complement test added alongside this PR. Excluding a room would mean the set of forgotten rooms in the cache would be extended. This means that room could be erroneously considered forgotten in the future. Introduced in #12310, Synapse 1.57.0. I don't think this had any user-visible side effects (until now). * SyncResultBuilder: track rooms to force as newly joined Similar plan as before. We've omitted rooms from certain sync responses; now we establish the mechanism to reintroduce them into future syncs. * Read new field, to present rooms as newly joined * Force un-partial-stated rooms to be newly-joined for eager incremental syncs only, provided they're still fully stated * Notify user stream listeners to wake up long polling syncs * Changelog * Typo fix Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> * Unnecessary list cast Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> * Rephrase comment Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> * Another comment Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> * Fixup merge(?) * Poke notifier when receiving un-partial-stated msg over replication * Fixup merge whoops Thanks MV :) Co-authored-by: Mathieu Velen Co-authored-by: Mathieu Velten Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/14870.feature | 1 + synapse/handlers/federation.py | 15 ++--- synapse/handlers/sync.py | 65 +++++++++++++++++--- synapse/notifier.py | 26 ++++++++ synapse/replication/tcp/client.py | 1 + synapse/storage/databases/main/relations.py | 1 + synapse/storage/databases/main/room.py | 47 ++++++++++++-- synapse/storage/databases/main/roommember.py | 19 ++++-- synapse/streams/events.py | 6 ++ synapse/types/__init__.py | 15 +++-- tests/rest/admin/test_room.py | 4 +- tests/rest/client/test_rooms.py | 10 +-- tests/rest/client/test_sync.py | 4 +- 13 files changed, 170 insertions(+), 44 deletions(-) create mode 100644 changelog.d/14870.feature diff --git a/changelog.d/14870.feature b/changelog.d/14870.feature new file mode 100644 index 000000000000..44f701d1c996 --- /dev/null +++ b/changelog.d/14870.feature @@ -0,0 +1 @@ +Faster joins: allow non-lazy-loading ("eager") syncs to complete after a partial join by omitting partial state rooms until they become fully stated. \ No newline at end of file diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 321712786584..233f8c113d19 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1868,22 +1868,17 @@ async def _sync_partial_state_room( async with self._is_partial_state_room_linearizer.queue(room_id): logger.info("Clearing partial-state flag for %s", room_id) - success = await self.store.clear_partial_state_room(room_id) + new_stream_id = await self.store.clear_partial_state_room(room_id) - # Poke the notifier so that other workers see the write to - # the un-partial-stated rooms stream. - self._notifier.notify_replication() - - if success: + if new_stream_id is not None: logger.info("State resync complete for %s", room_id) self._storage_controllers.state.notify_room_un_partial_stated( room_id ) - # Poke the notifier so that other workers see the write to - # the un-partial-stated rooms stream. - self._notifier.notify_replication() - + await self._notifier.on_un_partial_stated_room( + room_id, new_stream_id + ) return # we raced against more events arriving with partial state. Go round diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 78d488f2b1cb..ee117645673f 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -290,7 +290,7 @@ def __init__(self, hs: "HomeServer"): expiry_ms=LAZY_LOADED_MEMBERS_CACHE_MAX_AGE, ) - self.rooms_to_exclude = hs.config.server.rooms_to_exclude_from_sync + self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync async def wait_for_sync_for_user( self, @@ -1340,7 +1340,10 @@ async def generate_sync_result( membership_change_events = [] if since_token: membership_change_events = await self.store.get_membership_changes_for_user( - user_id, since_token.room_key, now_token.room_key, self.rooms_to_exclude + user_id, + since_token.room_key, + now_token.room_key, + self.rooms_to_exclude_globally, ) mem_last_change_by_room_id: Dict[str, EventBase] = {} @@ -1375,12 +1378,39 @@ async def generate_sync_result( else: mutable_joined_room_ids.discard(room_id) + # Tweak the set of rooms to return to the client for eager (non-lazy) syncs. + mutable_rooms_to_exclude = set(self.rooms_to_exclude_globally) + if not sync_config.filter_collection.lazy_load_members(): + # Non-lazy syncs should never include partially stated rooms. + # Exclude all partially stated rooms from this sync. + for room_id in mutable_joined_room_ids: + if await self.store.is_partial_state_room(room_id): + mutable_rooms_to_exclude.add(room_id) + + # Incremental eager syncs should additionally include rooms that + # - we are joined to + # - are full-stated + # - became fully-stated at some point during the sync period + # (These rooms will have been omitted during a previous eager sync.) + forced_newly_joined_room_ids = set() + if since_token and not sync_config.filter_collection.lazy_load_members(): + un_partial_stated_rooms = ( + await self.store.get_un_partial_stated_rooms_between( + since_token.un_partial_stated_rooms_key, + now_token.un_partial_stated_rooms_key, + mutable_joined_room_ids, + ) + ) + for room_id in un_partial_stated_rooms: + if not await self.store.is_partial_state_room(room_id): + forced_newly_joined_room_ids.add(room_id) + # Now we have our list of joined room IDs, exclude as configured and freeze joined_room_ids = frozenset( ( room_id for room_id in mutable_joined_room_ids - if room_id not in self.rooms_to_exclude + if room_id not in mutable_rooms_to_exclude ) ) @@ -1397,6 +1427,8 @@ async def generate_sync_result( since_token=since_token, now_token=now_token, joined_room_ids=joined_room_ids, + excluded_room_ids=frozenset(mutable_rooms_to_exclude), + forced_newly_joined_room_ids=frozenset(forced_newly_joined_room_ids), membership_change_events=membership_change_events, ) @@ -1834,14 +1866,16 @@ async def _generate_sync_entry_for_rooms( # 3. Work out which rooms need reporting in the sync response. ignored_users = await self.store.ignored_users(user_id) if since_token: - room_changes = await self._get_rooms_changed( + room_changes = await self._get_room_changes_for_incremental_sync( sync_result_builder, ignored_users ) tags_by_room = await self.store.get_updated_tags( user_id, since_token.account_data_key ) else: - room_changes = await self._get_all_rooms(sync_result_builder, ignored_users) + room_changes = await self._get_room_changes_for_initial_sync( + sync_result_builder, ignored_users + ) tags_by_room = await self.store.get_tags_for_user(user_id) log_kv({"rooms_changed": len(room_changes.room_entries)}) @@ -1900,7 +1934,7 @@ async def _have_rooms_changed( assert since_token - if membership_change_events: + if membership_change_events or sync_result_builder.forced_newly_joined_room_ids: return True stream_id = since_token.room_key.stream @@ -1909,7 +1943,7 @@ async def _have_rooms_changed( return True return False - async def _get_rooms_changed( + async def _get_room_changes_for_incremental_sync( self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str], @@ -1947,7 +1981,9 @@ async def _get_rooms_changed( for event in membership_change_events: mem_change_events_by_room_id.setdefault(event.room_id, []).append(event) - newly_joined_rooms: List[str] = [] + newly_joined_rooms: List[str] = list( + sync_result_builder.forced_newly_joined_room_ids + ) newly_left_rooms: List[str] = [] room_entries: List[RoomSyncResultBuilder] = [] invited: List[InvitedSyncResult] = [] @@ -2153,7 +2189,7 @@ async def _get_rooms_changed( newly_left_rooms, ) - async def _get_all_rooms( + async def _get_room_changes_for_initial_sync( self, sync_result_builder: "SyncResultBuilder", ignored_users: FrozenSet[str], @@ -2178,7 +2214,7 @@ async def _get_all_rooms( room_list = await self.store.get_rooms_for_local_user_where_membership_is( user_id=user_id, membership_list=Membership.LIST, - excluded_rooms=self.rooms_to_exclude, + excluded_rooms=sync_result_builder.excluded_room_ids, ) room_entries = [] @@ -2549,6 +2585,13 @@ class SyncResultBuilder: since_token: The token supplied by user, or None. now_token: The token to sync up to. joined_room_ids: List of rooms the user is joined to + excluded_room_ids: Set of room ids we should omit from the /sync response. + forced_newly_joined_room_ids: + Rooms that should be presented in the /sync response as if they were + newly joined during the sync period, even if that's not the case. + (This is useful if the room was previously excluded from a /sync response, + and now the client should be made aware of it.) + Only used by incremental syncs. # The following mirror the fields in a sync response presence @@ -2565,6 +2608,8 @@ class SyncResultBuilder: since_token: Optional[StreamToken] now_token: StreamToken joined_room_ids: FrozenSet[str] + excluded_room_ids: FrozenSet[str] + forced_newly_joined_room_ids: FrozenSet[str] membership_change_events: List[EventBase] presence: List[UserPresenceState] = attr.Factory(list) diff --git a/synapse/notifier.py b/synapse/notifier.py index 28f0d4a25afe..2b0e52f23c33 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -314,6 +314,32 @@ async def on_new_room_events( event_entries.append((entry, event.event_id)) await self.notify_new_room_events(event_entries, max_room_stream_token) + async def on_un_partial_stated_room( + self, + room_id: str, + new_token: int, + ) -> None: + """Used by the resync background processes to wake up all listeners + of this room when it is un-partial-stated. + + It will also notify replication listeners of the change in stream. + """ + + # Wake up all related user stream notifiers + user_streams = self.room_to_user_streams.get(room_id, set()) + time_now_ms = self.clock.time_msec() + for user_stream in user_streams: + try: + user_stream.notify( + StreamKeyType.UN_PARTIAL_STATED_ROOMS, new_token, time_now_ms + ) + except Exception: + logger.exception("Failed to notify listener") + + # Poke the replication so that other workers also see the write to + # the un-partial-stated rooms stream. + self.notify_replication() + async def notify_new_room_events( self, event_entries: List[Tuple[_PendingRoomEventEntry, str]], diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 2a9cb499a48d..cc0528bd8e5f 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -260,6 +260,7 @@ async def on_rdata( self._state_storage_controller.notify_room_un_partial_stated( row.room_id ) + await self.notifier.on_un_partial_stated_room(row.room_id, token) elif stream_name == UnPartialStatedEventStream.NAME: for row in rows: assert isinstance(row, UnPartialStatedEventStreamRow) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index aea96e9d2478..84f844b79e7f 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -292,6 +292,7 @@ def _get_recent_references_for_event_txn( to_device_key=0, device_list_key=0, groups_key=0, + un_partial_stated_rooms_key=0, ) return events[:limit], next_token diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 6a65b2a89bad..3aa7b945606e 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -26,6 +26,7 @@ Mapping, Optional, Sequence, + Set, Tuple, Union, cast, @@ -1294,10 +1295,44 @@ def get_un_partial_stated_rooms_token(self, instance_name: str) -> int: instance_name ) + async def get_un_partial_stated_rooms_between( + self, last_id: int, current_id: int, room_ids: Collection[str] + ) -> Set[str]: + """Get all rooms that got un partial stated between `last_id` exclusive and + `current_id` inclusive. + + Returns: + The list of room ids. + """ + + if last_id == current_id: + return set() + + def _get_un_partial_stated_rooms_between_txn( + txn: LoggingTransaction, + ) -> Set[str]: + sql = """ + SELECT DISTINCT room_id FROM un_partial_stated_room_stream + WHERE ? < stream_id AND stream_id <= ? AND + """ + + clause, args = make_in_list_sql_clause( + self.database_engine, "room_id", room_ids + ) + + txn.execute(sql + clause, [last_id, current_id] + args) + + return {r[0] for r in txn} + + return await self.db_pool.runInteraction( + "get_un_partial_stated_rooms_between", + _get_un_partial_stated_rooms_between_txn, + ) + async def get_un_partial_stated_rooms_from_stream( self, instance_name: str, last_id: int, current_id: int, limit: int ) -> Tuple[List[Tuple[int, Tuple[str]]], int, bool]: - """Get updates for caches replication stream. + """Get updates for un partial stated rooms replication stream. Args: instance_name: The writer we want to fetch updates from. Unused @@ -2304,16 +2339,16 @@ async def unblock_room(self, room_id: str) -> None: (room_id,), ) - async def clear_partial_state_room(self, room_id: str) -> bool: + async def clear_partial_state_room(self, room_id: str) -> Optional[int]: """Clears the partial state flag for a room. Args: room_id: The room whose partial state flag is to be cleared. Returns: - `True` if the partial state flag has been cleared successfully. + The corresponding stream id for the un-partial-stated rooms stream. - `False` if the partial state flag could not be cleared because the room + `None` if the partial state flag could not be cleared because the room still contains events with partial state. """ try: @@ -2324,7 +2359,7 @@ async def clear_partial_state_room(self, room_id: str) -> bool: room_id, un_partial_state_room_stream_id, ) - return True + return un_partial_state_room_stream_id except self.db_pool.engine.module.IntegrityError as e: # Assume that any `IntegrityError`s are due to partial state events. logger.info( @@ -2332,7 +2367,7 @@ async def clear_partial_state_room(self, room_id: str) -> bool: room_id, e, ) - return False + return None def _clear_partial_state_room_txn( self, diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index f02c1d7ea7aa..8e2ba7b7b470 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -15,6 +15,7 @@ import logging from typing import ( TYPE_CHECKING, + AbstractSet, Collection, Dict, FrozenSet, @@ -47,7 +48,13 @@ ProfileInfo, RoomsForUser, ) -from synapse.types import JsonDict, PersistedEventPosition, StateMap, get_domain_from_id +from synapse.types import ( + JsonDict, + PersistedEventPosition, + StateMap, + StrCollection, + get_domain_from_id, +) from synapse.util.async_helpers import Linearizer from synapse.util.caches import intern_string from synapse.util.caches.descriptors import _CacheContext, cached, cachedList @@ -385,7 +392,7 @@ async def get_rooms_for_local_user_where_membership_is( self, user_id: str, membership_list: Collection[str], - excluded_rooms: Optional[List[str]] = None, + excluded_rooms: StrCollection = (), ) -> List[RoomsForUser]: """Get all the rooms for this *local* user where the membership for this user matches one in the membership list. @@ -412,10 +419,12 @@ async def get_rooms_for_local_user_where_membership_is( ) # Now we filter out forgotten and excluded rooms - rooms_to_exclude: Set[str] = await self.get_forgotten_rooms_for_user(user_id) + rooms_to_exclude = await self.get_forgotten_rooms_for_user(user_id) if excluded_rooms is not None: - rooms_to_exclude.update(set(excluded_rooms)) + # Take a copy to avoid mutating the in-cache set + rooms_to_exclude = set(rooms_to_exclude) + rooms_to_exclude.update(excluded_rooms) return [room for room in rooms if room.room_id not in rooms_to_exclude] @@ -1169,7 +1178,7 @@ def f(txn: LoggingTransaction) -> int: return count == 0 @cached() - async def get_forgotten_rooms_for_user(self, user_id: str) -> Set[str]: + async def get_forgotten_rooms_for_user(self, user_id: str) -> AbstractSet[str]: """Gets all rooms the user has forgotten. Args: diff --git a/synapse/streams/events.py b/synapse/streams/events.py index 619eb7f601de..d7084d2358cc 100644 --- a/synapse/streams/events.py +++ b/synapse/streams/events.py @@ -53,11 +53,15 @@ def __init__(self, hs: "HomeServer"): *(attribute.type(hs) for attribute in attr.fields(_EventSourcesInner)) ) self.store = hs.get_datastores().main + self._instance_name = hs.get_instance_name() def get_current_token(self) -> StreamToken: push_rules_key = self.store.get_max_push_rules_stream_id() to_device_key = self.store.get_to_device_stream_token() device_list_key = self.store.get_device_stream_token() + un_partial_stated_rooms_key = self.store.get_un_partial_stated_rooms_token( + self._instance_name + ) token = StreamToken( room_key=self.sources.room.get_current_key(), @@ -70,6 +74,7 @@ def get_current_token(self) -> StreamToken: device_list_key=device_list_key, # Groups key is unused. groups_key=0, + un_partial_stated_rooms_key=un_partial_stated_rooms_key, ) return token @@ -107,5 +112,6 @@ async def get_current_token_for_pagination(self, room_id: str) -> StreamToken: to_device_key=0, device_list_key=0, groups_key=0, + un_partial_stated_rooms_key=0, ) return token diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index c59eca24301e..f82d1cfc298b 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -17,6 +17,7 @@ import string from typing import ( TYPE_CHECKING, + AbstractSet, Any, ClassVar, Dict, @@ -79,7 +80,7 @@ # Collection[str] that does not include str itself; str being a Sequence[str] # is very misleading and results in bugs. -StrCollection = Union[Tuple[str, ...], List[str], Set[str]] +StrCollection = Union[Tuple[str, ...], List[str], AbstractSet[str]] # Note that this seems to require inheriting *directly* from Interface in order @@ -633,6 +634,7 @@ class StreamKeyType: PUSH_RULES: Final = "push_rules_key" TO_DEVICE: Final = "to_device_key" DEVICE_LIST: Final = "device_list_key" + UN_PARTIAL_STATED_ROOMS = "un_partial_stated_rooms_key" @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -640,7 +642,7 @@ class StreamToken: """A collection of keys joined together by underscores in the following order and which represent the position in their respective streams. - ex. `s2633508_17_338_6732159_1082514_541479_274711_265584_1` + ex. `s2633508_17_338_6732159_1082514_541479_274711_265584_1_379` 1. `room_key`: `s2633508` which is a `RoomStreamToken` - `RoomStreamToken`'s can also look like `t426-2633508` or `m56~2.58~3.59` - See the docstring for `RoomStreamToken` for more details. @@ -652,12 +654,13 @@ class StreamToken: 7. `to_device_key`: `274711` 8. `device_list_key`: `265584` 9. `groups_key`: `1` (note that this key is now unused) + 10. `un_partial_stated_rooms_key`: `379` You can see how many of these keys correspond to the various fields in a "/sync" response: ```json { - "next_batch": "s12_4_0_1_1_1_1_4_1", + "next_batch": "s12_4_0_1_1_1_1_4_1_1", "presence": { "events": [] }, @@ -669,7 +672,7 @@ class StreamToken: "!QrZlfIDQLNLdZHqTnt:hs1": { "timeline": { "events": [], - "prev_batch": "s10_4_0_1_1_1_1_4_1", + "prev_batch": "s10_4_0_1_1_1_1_4_1_1", "limited": false }, "state": { @@ -705,6 +708,7 @@ class StreamToken: device_list_key: int # Note that the groups key is no longer used and may have bogus values. groups_key: int + un_partial_stated_rooms_key: int _SEPARATOR = "_" START: ClassVar["StreamToken"] @@ -743,6 +747,7 @@ async def to_string(self, store: "DataStore") -> str: # serialized so that there will not be confusion in the future # if additional tokens are added. str(self.groups_key), + str(self.un_partial_stated_rooms_key), ] ) @@ -775,7 +780,7 @@ def copy_and_replace(self, key: str, new_value: Any) -> "StreamToken": return attr.evolve(self, **{key: new_value}) -StreamToken.START = StreamToken(RoomStreamToken(None, 0), 0, 0, 0, 0, 0, 0, 0, 0) +StreamToken.START = StreamToken(RoomStreamToken(None, 0), 0, 0, 0, 0, 0, 0, 0, 0, 0) @attr.s(slots=True, frozen=True, auto_attribs=True) diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index e0f5d54abab0..453a6e979c02 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1831,7 +1831,7 @@ def test_timestamp_to_event(self) -> None: def test_topo_token_is_accepted(self) -> None: """Test Topo Token is accepted.""" - token = "t1-0_0_0_0_0_0_0_0_0" + token = "t1-0_0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/_synapse/admin/v1/rooms/%s/messages?from=%s" % (self.room_id, token), @@ -1845,7 +1845,7 @@ def test_topo_token_is_accepted(self) -> None: def test_stream_token_is_accepted_for_fwd_pagianation(self) -> None: """Test that stream token is accepted for forward pagination.""" - token = "s0_0_0_0_0_0_0_0_0" + token = "s0_0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/_synapse/admin/v1/rooms/%s/messages?from=%s" % (self.room_id, token), diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index b4daace55617..9222cab19801 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -1987,7 +1987,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as(self.user_id) def test_topo_token_is_accepted(self) -> None: - token = "t1-0_0_0_0_0_0_0_0_0" + token = "t1-0_0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) @@ -1998,7 +1998,7 @@ def test_topo_token_is_accepted(self) -> None: self.assertTrue("end" in channel.json_body) def test_stream_token_is_accepted_for_fwd_pagianation(self) -> None: - token = "s0_0_0_0_0_0_0_0_0" + token = "s0_0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/rooms/%s/messages?access_token=x&from=%s" % (self.room_id, token) ) @@ -2728,7 +2728,7 @@ def test_messages_filter_labels(self) -> None: """Test that we can filter by a label on a /messages request.""" self._send_labelled_messages_in_room() - token = "s0_0_0_0_0_0_0_0_0" + token = "s0_0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/rooms/%s/messages?access_token=%s&from=%s&filter=%s" @@ -2745,7 +2745,7 @@ def test_messages_filter_not_labels(self) -> None: """Test that we can filter by the absence of a label on a /messages request.""" self._send_labelled_messages_in_room() - token = "s0_0_0_0_0_0_0_0_0" + token = "s0_0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/rooms/%s/messages?access_token=%s&from=%s&filter=%s" @@ -2768,7 +2768,7 @@ def test_messages_filter_labels_not_labels(self) -> None: """ self._send_labelled_messages_in_room() - token = "s0_0_0_0_0_0_0_0_0" + token = "s0_0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/rooms/%s/messages?access_token=%s&from=%s&filter=%s" diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index 0af643ecd97b..c9afa0f3dd05 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -913,7 +913,9 @@ def prepare( # We need to manually append the room ID, because we can't know the ID before # creating the room, and we can't set the config after starting the homeserver. - self.hs.get_sync_handler().rooms_to_exclude.append(self.excluded_room_id) + self.hs.get_sync_handler().rooms_to_exclude_globally.append( + self.excluded_room_id + ) def test_join_leave(self) -> None: """Tests that rooms are correctly excluded from the 'join' and 'leave' sections of From 4607be0b7b2165710dc2e5e68ec4281b593ca8c5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 24 Jan 2023 15:28:20 +0000 Subject: [PATCH 060/344] Request partial joins by default (#14905) * Request partial joins by default This is a little sloppy, but we are trying to gain confidence in faster joins in the upcoming RC. Admins can still opt out by adding the following to their Synapse config: ```yaml experimental: faster_joins: false ``` We may revert this change before the release proper, depending on how testing in the wild goes. * Changelog * Try to fix the backfill test failures * Upgrade notes * Postgres compat? --- changelog.d/14905.feature | 1 + docs/upgrade.md | 13 ++++++++ synapse/config/experimental.py | 2 +- synapse/storage/databases/main/stream.py | 40 ++++++++++++++++++++---- 4 files changed, 49 insertions(+), 7 deletions(-) create mode 100644 changelog.d/14905.feature diff --git a/changelog.d/14905.feature b/changelog.d/14905.feature new file mode 100644 index 000000000000..f13a4af9815a --- /dev/null +++ b/changelog.d/14905.feature @@ -0,0 +1 @@ +Faster joins: request partial joins by default. Admins can opt-out of this for the time being---see the upgrade notes. diff --git a/docs/upgrade.md b/docs/upgrade.md index 0d486a3c8200..6316db563b1a 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -90,6 +90,19 @@ process, for example: # Upgrading to v1.76.0 +## Faster joins are enabled by default + +When joining a room for the first time, Synapse 1.76.0rc1 will request a partial join from the other server by default. Previously, server admins had to opt-in to this using an experimental config flag. + +Server admins can opt out of this feature for the time being by setting + +```yaml +experimental: + faster_joins: false +``` + +in their server config. + ## Changes to the account data replication streams Synapse has changed the format of the account data and devices replication diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 89586db76313..2590c88cdeb1 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -84,7 +84,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # experimental support for faster joins over federation # (MSC2775, MSC3706, MSC3895) # requires a target server that can provide a partial join response (MSC3706) - self.faster_joins_enabled: bool = experimental.get("faster_joins", False) + self.faster_joins_enabled: bool = experimental.get("faster_joins", True) # MSC3720 (Account status endpoint) self.msc3720_enabled: bool = experimental.get("msc3720_enabled", False) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 63d835053019..d28fc65df9bb 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -67,7 +67,7 @@ make_in_list_sql_clause, ) from synapse.storage.databases.main.events_worker import EventsWorkerStore -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import PersistedEventPosition, RoomStreamToken from synapse.util.caches.descriptors import cached @@ -944,12 +944,40 @@ async def get_current_topological_token(self, room_id: str, stream_key: int) -> room_id stream_key """ - sql = ( - "SELECT coalesce(MIN(topological_ordering), 0) FROM events" - " WHERE room_id = ? AND stream_ordering >= ?" - ) + if isinstance(self.database_engine, PostgresEngine): + min_function = "LEAST" + elif isinstance(self.database_engine, Sqlite3Engine): + min_function = "MIN" + else: + raise RuntimeError(f"Unknown database engine {self.database_engine}") + + # This query used to be + # SELECT COALESCE(MIN(topological_ordering), 0) FROM events + # WHERE room_id = ? and events.stream_ordering >= {stream_key} + # which returns 0 if the stream_key is newer than any event in + # the room. That's not wrong, but it seems to interact oddly with backfill, + # requiring a second call to /messages to actually backfill from a remote + # homeserver. + # + # Instead, rollback the stream ordering to that after the most recent event in + # this room. + sql = f""" + WITH fallback(max_stream_ordering) AS ( + SELECT MAX(stream_ordering) + FROM events + WHERE room_id = ? + ) + SELECT COALESCE(MIN(topological_ordering), 0) FROM events + WHERE + room_id = ? + AND events.stream_ordering >= {min_function}( + ?, + (SELECT max_stream_ordering FROM fallback) + ) + """ + row = await self.db_pool.execute( - "get_current_topological_token", None, sql, room_id, stream_key + "get_current_topological_token", None, sql, room_id, room_id, stream_key ) return row[0][0] if row else 0 From b15f0758e59c8705ff50a414a5683286b5972381 Mon Sep 17 00:00:00 2001 From: ZAID BIN TARIQ <57444558+thezaidbintariq@users.noreply.github.com> Date: Wed, 25 Jan 2023 17:01:27 +0500 Subject: [PATCH 061/344] Document the export user data command. (#14883) --- changelog.d/14883.doc | 1 + docs/usage/administration/admin_faq.md | 8 ++++++++ 2 files changed, 9 insertions(+) create mode 100644 changelog.d/14883.doc diff --git a/changelog.d/14883.doc b/changelog.d/14883.doc new file mode 100644 index 000000000000..8e36cb1c3b06 --- /dev/null +++ b/changelog.d/14883.doc @@ -0,0 +1 @@ +Document the export user data command. Contributed by @thezaidbintariq. \ No newline at end of file diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md index a6dc6197c90e..18ce6171dbba 100644 --- a/docs/usage/administration/admin_faq.md +++ b/docs/usage/administration/admin_faq.md @@ -32,6 +32,14 @@ What users are registered on my server? SELECT NAME from users; ``` +How can I export user data? +--- +Synapse includes a Python command to export data for a specific user. It takes the homeserver +configuration file and the full Matrix ID of the user to export: +```console +python -m synapse.app.admin_cmd -c export-data +``` + Manually resetting passwords --- Users can reset their password through their client. Alternatively, a server admin From a63d4cc9e96c1f5bb9c5bb9fc9119fb137de3b1b Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 25 Jan 2023 13:38:53 +0000 Subject: [PATCH 062/344] Make sqlite database migrations transactional again (#14910) #13873 introduced a regression which causes sqlite database migrations to no longer run inside a transaction. Wrap them in a transaction again, to avoid database corruption when migrations are interrupted. Fixes #14909. Signed-off-by: Sean Quah --- changelog.d/14910.bugfix | 1 + synapse/storage/engines/_base.py | 3 +++ synapse/storage/engines/sqlite.py | 5 +++-- 3 files changed, 7 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14910.bugfix diff --git a/changelog.d/14910.bugfix b/changelog.d/14910.bugfix new file mode 100644 index 000000000000..f1f34cd6badb --- /dev/null +++ b/changelog.d/14910.bugfix @@ -0,0 +1 @@ +Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index 70e594a68f09..bc9ca3a53c40 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -132,6 +132,9 @@ def executescript(cursor: CursorType, script: str) -> None: """Execute a chunk of SQL containing multiple semicolon-delimited statements. This is not provided by DBAPI2, and so needs engine-specific support. + + Some database engines may automatically COMMIT the ongoing transaction both + before and after executing the script. """ ... diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 14260442b6e4..2f7df85ce4fd 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -135,13 +135,14 @@ def executescript(cursor: sqlite3.Cursor, script: str) -> None: > than one statement with it, it will raise a Warning. Use executescript() if > you want to execute multiple SQL statements with one call. - Though the docs for `executescript` warn: + The script is wrapped in transaction control statemnets, since the docs for + `executescript` warn: > If there is a pending transaction, an implicit COMMIT statement is executed > first. No other implicit transaction control is performed; any transaction > control must be added to sql_script. """ - cursor.executescript(script) + cursor.executescript(f"BEGIN TRANSACTION;\n{script}\nCOMMIT;") # Following functions taken from: https://github.com/coleifer/peewee From 8e37ece015c8afd97572bdc742981792b02c6700 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 25 Jan 2023 16:11:06 +0000 Subject: [PATCH 063/344] Bump the client-side timeout for /state (#14912) * Bump the client-side timeout for /state to allow faster joins resyncs the chance to complete for large rooms. We have seen this fair poorly (~90s for Matrix HQ's /state) in testing, causing the resync to advance to another HS who hasn't seen our join yet. * Changelog * Milliseconds!!!! --- changelog.d/14912.misc | 1 + synapse/federation/transport/client.py | 4 ++++ 2 files changed, 5 insertions(+) create mode 100644 changelog.d/14912.misc diff --git a/changelog.d/14912.misc b/changelog.d/14912.misc new file mode 100644 index 000000000000..9dbc6b3424af --- /dev/null +++ b/changelog.d/14912.misc @@ -0,0 +1 @@ +Faster joins: allow the resync process more time to fetch `/state` ids. diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 556883f0798b..682666ab3602 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -102,6 +102,10 @@ async def get_room_state( destination, path=path, args={"event_id": event_id}, + # This can take a looooooong time for large rooms. Give this a generous + # timeout of 10 minutes to avoid the partial state resync timing out early + # and trying a bunch of servers who haven't seen our join yet. + timeout=600_000, parser=_StateParser(room_version), ) From 8a7d2de51fad26cbfb0680482e78c12b5ed90279 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 25 Jan 2023 16:21:27 +0000 Subject: [PATCH 064/344] 1.76.0rc1 --- CHANGES.md | 87 +++++++++++++++++++++++++++++++++++++++ changelog.d/14111.feature | 1 - changelog.d/14629.feature | 1 - changelog.d/14667.doc | 1 - changelog.d/14747.feature | 1 - changelog.d/14749.misc | 1 - changelog.d/14752.misc | 1 - changelog.d/14773.doc | 1 - changelog.d/14775.feature | 1 - changelog.d/14787.feature | 1 - changelog.d/14799.bugfix | 1 - changelog.d/14803.doc | 1 - changelog.d/14804.misc | 1 - changelog.d/14807.misc | 1 - changelog.d/14811.feature | 1 - changelog.d/14812.bugfix | 1 - changelog.d/14816.misc | 1 - changelog.d/14818.doc | 1 - changelog.d/14819.misc | 1 - changelog.d/14820.bugfix | 1 - changelog.d/14821.misc | 1 - changelog.d/14822.misc | 1 - changelog.d/14824.doc | 1 - changelog.d/14825.misc | 1 - changelog.d/14826.misc | 1 - changelog.d/14832.misc | 1 - changelog.d/14833.misc | 1 - changelog.d/14839.feature | 1 - changelog.d/14841.misc | 1 - changelog.d/14842.bugfix | 1 - changelog.d/14843.misc | 1 - changelog.d/14844.misc | 1 - changelog.d/14845.doc | 1 - changelog.d/14848.misc | 1 - changelog.d/14855.misc | 1 - changelog.d/14856.misc | 1 - changelog.d/14860.removal | 1 - changelog.d/14861.misc | 1 - changelog.d/14862.misc | 1 - changelog.d/14863.misc | 1 - changelog.d/14864.bugfix | 1 - changelog.d/14868.doc | 1 - changelog.d/14870.feature | 1 - changelog.d/14872.misc | 1 - changelog.d/14873.bugfix | 1 - changelog.d/14874.bugfix | 1 - changelog.d/14875.docker | 1 - changelog.d/14877.misc | 1 - changelog.d/14881.misc | 1 - changelog.d/14882.bugfix | 1 - changelog.d/14883.doc | 1 - changelog.d/14885.misc | 1 - changelog.d/14889.misc | 1 - changelog.d/14896.misc | 1 - changelog.d/14897.misc | 1 - changelog.d/14899.misc | 1 - changelog.d/14900.misc | 1 - changelog.d/14901.misc | 1 - changelog.d/14905.feature | 1 - changelog.d/14910.bugfix | 1 - changelog.d/14912.misc | 1 - debian/changelog | 5 ++- pyproject.toml | 2 +- 63 files changed, 91 insertions(+), 63 deletions(-) delete mode 100644 changelog.d/14111.feature delete mode 100644 changelog.d/14629.feature delete mode 100644 changelog.d/14667.doc delete mode 100644 changelog.d/14747.feature delete mode 100644 changelog.d/14749.misc delete mode 100644 changelog.d/14752.misc delete mode 100644 changelog.d/14773.doc delete mode 100644 changelog.d/14775.feature delete mode 100644 changelog.d/14787.feature delete mode 100644 changelog.d/14799.bugfix delete mode 100644 changelog.d/14803.doc delete mode 100644 changelog.d/14804.misc delete mode 100644 changelog.d/14807.misc delete mode 100644 changelog.d/14811.feature delete mode 100644 changelog.d/14812.bugfix delete mode 100644 changelog.d/14816.misc delete mode 100644 changelog.d/14818.doc delete mode 100644 changelog.d/14819.misc delete mode 100644 changelog.d/14820.bugfix delete mode 100644 changelog.d/14821.misc delete mode 100644 changelog.d/14822.misc delete mode 100644 changelog.d/14824.doc delete mode 100644 changelog.d/14825.misc delete mode 100644 changelog.d/14826.misc delete mode 100644 changelog.d/14832.misc delete mode 100644 changelog.d/14833.misc delete mode 100644 changelog.d/14839.feature delete mode 100644 changelog.d/14841.misc delete mode 100644 changelog.d/14842.bugfix delete mode 100644 changelog.d/14843.misc delete mode 100644 changelog.d/14844.misc delete mode 100644 changelog.d/14845.doc delete mode 100644 changelog.d/14848.misc delete mode 100644 changelog.d/14855.misc delete mode 100644 changelog.d/14856.misc delete mode 100644 changelog.d/14860.removal delete mode 100644 changelog.d/14861.misc delete mode 100644 changelog.d/14862.misc delete mode 100644 changelog.d/14863.misc delete mode 100644 changelog.d/14864.bugfix delete mode 100644 changelog.d/14868.doc delete mode 100644 changelog.d/14870.feature delete mode 100644 changelog.d/14872.misc delete mode 100644 changelog.d/14873.bugfix delete mode 100644 changelog.d/14874.bugfix delete mode 100644 changelog.d/14875.docker delete mode 100644 changelog.d/14877.misc delete mode 100644 changelog.d/14881.misc delete mode 100644 changelog.d/14882.bugfix delete mode 100644 changelog.d/14883.doc delete mode 100644 changelog.d/14885.misc delete mode 100644 changelog.d/14889.misc delete mode 100644 changelog.d/14896.misc delete mode 100644 changelog.d/14897.misc delete mode 100644 changelog.d/14899.misc delete mode 100644 changelog.d/14900.misc delete mode 100644 changelog.d/14901.misc delete mode 100644 changelog.d/14905.feature delete mode 100644 changelog.d/14910.bugfix delete mode 100644 changelog.d/14912.misc diff --git a/CHANGES.md b/CHANGES.md index 30f0a0674761..46fd48dd7df9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,90 @@ +Synapse 1.76.0rc1 (2023-01-25) +============================== + +Features +-------- + +- Update the default room version to [v10](https://spec.matrix.org/v1.5/rooms/v10/) ([MSC 3904](https://github.com/matrix-org/matrix-spec-proposals/pull/3904)). Contributed by @FSG-Cat. ([\#14111](https://github.com/matrix-org/synapse/issues/14111)) +- Adds a `set_displayname()` method to the module API for setting a user's display name. ([\#14629](https://github.com/matrix-org/synapse/issues/14629)) +- Add a dedicated listener configuration for `health` endpoint. ([\#14747](https://github.com/matrix-org/synapse/issues/14747)) +- Implement support for MSC3890: Remotely silence local notifications. ([\#14775](https://github.com/matrix-org/synapse/issues/14775)) +- Implement experimental support for MSC3930: Push rules for (MSC3381) Polls. ([\#14787](https://github.com/matrix-org/synapse/issues/14787)) +- Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement. ([\#14811](https://github.com/matrix-org/synapse/issues/14811)) +- Faster joins: always serve a partial join response to servers that request it with the stable query param. ([\#14839](https://github.com/matrix-org/synapse/issues/14839)) +- Faster joins: allow non-lazy-loading ("eager") syncs to complete after a partial join by omitting partial state rooms until they become fully stated. ([\#14870](https://github.com/matrix-org/synapse/issues/14870)) +- Faster joins: request partial joins by default. Admins can opt-out of this for the time being---see the upgrade notes. ([\#14905](https://github.com/matrix-org/synapse/issues/14905)) + + +Bugfixes +-------- + +- Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room. ([\#14799](https://github.com/matrix-org/synapse/issues/14799)) +- Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. ([\#14812](https://github.com/matrix-org/synapse/issues/14812), [\#14842](https://github.com/matrix-org/synapse/issues/14842)) +- Fix rare races when using workers. ([\#14820](https://github.com/matrix-org/synapse/issues/14820)) +- Fix a bug introduced in Synapse 1.64.0 when using room version 10 with frozen events enabled. ([\#14864](https://github.com/matrix-org/synapse/issues/14864)) +- Fix a long-standing bug where the `populate_room_stats` background job could fail on broken rooms. ([\#14873](https://github.com/matrix-org/synapse/issues/14873)) +- Faster joins: Fix a bug in worker deployments where the room stats and user directory would not get updated when finishing a fast join until another event is sent or received. ([\#14874](https://github.com/matrix-org/synapse/issues/14874)) +- Faster joins: Fix incompatibility with joins into restricted rooms where no local users have the ability to invite. ([\#14882](https://github.com/matrix-org/synapse/issues/14882)) +- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14910](https://github.com/matrix-org/synapse/issues/14910)) + + +Updates to the Docker image +--------------------------- + +- Bump default Python version in the Dockerfile from 3.9 to 3.11. ([\#14875](https://github.com/matrix-org/synapse/issues/14875)) + + +Improved Documentation +---------------------- + +- Include `x_forwarded` entry in the HTTP listener example configs and remove the remaining `worker_main_http_uri` entries. ([\#14667](https://github.com/matrix-org/synapse/issues/14667)) +- Remove duplicate commands from the Code Style documentation page; point to the Contributing Guide instead. ([\#14773](https://github.com/matrix-org/synapse/issues/14773)) +- Add missing documentation for `tag` to `listeners` section. ([\#14803](https://github.com/matrix-org/synapse/issues/14803)) +- Updated documentation in configuration manual for `user_directory.search_all_users`. ([\#14818](https://github.com/matrix-org/synapse/issues/14818)) +- Add `worker_manhole` to configuration manual. ([\#14824](https://github.com/matrix-org/synapse/issues/14824)) +- Fix the example config missing the `id` field in [application service documentation](https://matrix-org.github.io/synapse/latest/application_services.html). ([\#14845](https://github.com/matrix-org/synapse/issues/14845)) +- Minor corrections to the logging configuration documentation. ([\#14868](https://github.com/matrix-org/synapse/issues/14868)) +- Document the export user data command. Contributed by @thezaidbintariq. ([\#14883](https://github.com/matrix-org/synapse/issues/14883)) + + +Deprecations and Removals +------------------------- + +- Poetry 1.3.2 or higher is now required when `poetry install`ing from source. ([\#14860](https://github.com/matrix-org/synapse/issues/14860)) + + +Internal Changes +---------------- + +- Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state. ([\#14749](https://github.com/matrix-org/synapse/issues/14749)) +- Enable Complement tests for Faster Remote Room Joins against worker-mode Synapse. ([\#14752](https://github.com/matrix-org/synapse/issues/14752)) +- Add some clarifying comments and refactor a portion of the `Keyring` class for readability. ([\#14804](https://github.com/matrix-org/synapse/issues/14804)) +- Add local poetry config files (`poetry.toml`) to `.gitignore`. ([\#14807](https://github.com/matrix-org/synapse/issues/14807)) +- Add missing type hints. ([\#14816](https://github.com/matrix-org/synapse/issues/14816), [\#14885](https://github.com/matrix-org/synapse/issues/14885), [\#14889](https://github.com/matrix-org/synapse/issues/14889)) +- Refactor push tests. ([\#14819](https://github.com/matrix-org/synapse/issues/14819)) +- Re-enable some linting that was disabled when we switched to ruff. ([\#14821](https://github.com/matrix-org/synapse/issues/14821)) +- Add `cargo fmt` and `cargo clippy` to the lint script. ([\#14822](https://github.com/matrix-org/synapse/issues/14822)) +- Drop unused table `presence`. ([\#14825](https://github.com/matrix-org/synapse/issues/14825)) +- Merge the two account data and the two device list replication streams. ([\#14826](https://github.com/matrix-org/synapse/issues/14826), [\#14833](https://github.com/matrix-org/synapse/issues/14833)) +- Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#14832](https://github.com/matrix-org/synapse/issues/14832), [\#14841](https://github.com/matrix-org/synapse/issues/14841)) +- Add a parameter to control whether the federation client performs a partial state join. ([\#14843](https://github.com/matrix-org/synapse/issues/14843)) +- Add check to avoid starting duplicate partial state syncs. ([\#14844](https://github.com/matrix-org/synapse/issues/14844)) +- Bump regex from 1.7.0 to 1.7.1. ([\#14848](https://github.com/matrix-org/synapse/issues/14848)) +- Add an early return when handling no-op presence updates. ([\#14855](https://github.com/matrix-org/synapse/issues/14855)) +- Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. ([\#14856](https://github.com/matrix-org/synapse/issues/14856), [\#14872](https://github.com/matrix-org/synapse/issues/14872)) +- Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2. ([\#14861](https://github.com/matrix-org/synapse/issues/14861)) +- Bump ruff from 0.0.215 to 0.0.224. ([\#14862](https://github.com/matrix-org/synapse/issues/14862)) +- Bump types-pillow from 9.4.0.0 to 9.4.0.3. ([\#14863](https://github.com/matrix-org/synapse/issues/14863)) +- Always notify replication when a stream advances automatically. ([\#14877](https://github.com/matrix-org/synapse/issues/14877)) +- Reduce max time we wait for stream positions. ([\#14881](https://github.com/matrix-org/synapse/issues/14881)) +- Bump types-opentracing from 2.4.10 to 2.4.10.1. ([\#14896](https://github.com/matrix-org/synapse/issues/14896)) +- Bump ruff from 0.0.224 to 0.0.230. ([\#14897](https://github.com/matrix-org/synapse/issues/14897)) +- Bump types-requests from 2.28.11.7 to 2.28.11.8. ([\#14899](https://github.com/matrix-org/synapse/issues/14899)) +- Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4. ([\#14900](https://github.com/matrix-org/synapse/issues/14900)) +- Bump types-commonmark from 0.9.2 to 0.9.2.1. ([\#14901](https://github.com/matrix-org/synapse/issues/14901)) +- Faster joins: allow the resync process more time to fetch `/state` ids. ([\#14912](https://github.com/matrix-org/synapse/issues/14912)) + + Synapse 1.75.0 (2023-01-17) =========================== diff --git a/changelog.d/14111.feature b/changelog.d/14111.feature deleted file mode 100644 index 0a794701a769..000000000000 --- a/changelog.d/14111.feature +++ /dev/null @@ -1 +0,0 @@ -Update the default room version to [v10](https://spec.matrix.org/v1.5/rooms/v10/) ([MSC 3904](https://github.com/matrix-org/matrix-spec-proposals/pull/3904)). Contributed by @FSG-Cat. \ No newline at end of file diff --git a/changelog.d/14629.feature b/changelog.d/14629.feature deleted file mode 100644 index 78f5fc24035f..000000000000 --- a/changelog.d/14629.feature +++ /dev/null @@ -1 +0,0 @@ -Adds a `set_displayname()` method to the module API for setting a user's display name. diff --git a/changelog.d/14667.doc b/changelog.d/14667.doc deleted file mode 100644 index 86d6288121da..000000000000 --- a/changelog.d/14667.doc +++ /dev/null @@ -1 +0,0 @@ -Include `x_forwarded` entry in the HTTP listener example configs and remove the remaining `worker_main_http_uri` entries. diff --git a/changelog.d/14747.feature b/changelog.d/14747.feature deleted file mode 100644 index 0b8066159cba..000000000000 --- a/changelog.d/14747.feature +++ /dev/null @@ -1 +0,0 @@ -Add a dedicated listener configuration for `health` endpoint. \ No newline at end of file diff --git a/changelog.d/14749.misc b/changelog.d/14749.misc deleted file mode 100644 index ff813252250d..000000000000 --- a/changelog.d/14749.misc +++ /dev/null @@ -1 +0,0 @@ -Faster remote room joins (worker mode): do not populate external hosts-in-room cache when sending events as this requires blocking for full state. \ No newline at end of file diff --git a/changelog.d/14752.misc b/changelog.d/14752.misc deleted file mode 100644 index 1f9675c53bca..000000000000 --- a/changelog.d/14752.misc +++ /dev/null @@ -1 +0,0 @@ -Enable Complement tests for Faster Remote Room Joins against worker-mode Synapse. \ No newline at end of file diff --git a/changelog.d/14773.doc b/changelog.d/14773.doc deleted file mode 100644 index 0992444be07d..000000000000 --- a/changelog.d/14773.doc +++ /dev/null @@ -1 +0,0 @@ -Remove duplicate commands from the Code Style documentation page; point to the Contributing Guide instead. \ No newline at end of file diff --git a/changelog.d/14775.feature b/changelog.d/14775.feature deleted file mode 100644 index 7b7ee42cacba..000000000000 --- a/changelog.d/14775.feature +++ /dev/null @@ -1 +0,0 @@ -Implement support for MSC3890: Remotely silence local notifications. \ No newline at end of file diff --git a/changelog.d/14787.feature b/changelog.d/14787.feature deleted file mode 100644 index 6a340350470c..000000000000 --- a/changelog.d/14787.feature +++ /dev/null @@ -1 +0,0 @@ -Implement experimental support for MSC3930: Push rules for (MSC3381) Polls. \ No newline at end of file diff --git a/changelog.d/14799.bugfix b/changelog.d/14799.bugfix deleted file mode 100644 index dc867bd93a7c..000000000000 --- a/changelog.d/14799.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add index to improve performance of the `/timestamp_to_event` endpoint used for jumping to a specific date in the timeline of a room. \ No newline at end of file diff --git a/changelog.d/14803.doc b/changelog.d/14803.doc deleted file mode 100644 index 30d8ec8dbc01..000000000000 --- a/changelog.d/14803.doc +++ /dev/null @@ -1 +0,0 @@ -Add missing documentation for `tag` to `listeners` section. \ No newline at end of file diff --git a/changelog.d/14804.misc b/changelog.d/14804.misc deleted file mode 100644 index 24302332bd64..000000000000 --- a/changelog.d/14804.misc +++ /dev/null @@ -1 +0,0 @@ -Add some clarifying comments and refactor a portion of the `Keyring` class for readability. \ No newline at end of file diff --git a/changelog.d/14807.misc b/changelog.d/14807.misc deleted file mode 100644 index eef9cd3a44de..000000000000 --- a/changelog.d/14807.misc +++ /dev/null @@ -1 +0,0 @@ -Add local poetry config files (`poetry.toml`) to `.gitignore`. \ No newline at end of file diff --git a/changelog.d/14811.feature b/changelog.d/14811.feature deleted file mode 100644 index 87542835c3a2..000000000000 --- a/changelog.d/14811.feature +++ /dev/null @@ -1 +0,0 @@ -Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement. diff --git a/changelog.d/14812.bugfix b/changelog.d/14812.bugfix deleted file mode 100644 index 94e0d70cbcc4..000000000000 --- a/changelog.d/14812.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. diff --git a/changelog.d/14816.misc b/changelog.d/14816.misc deleted file mode 100644 index d44571b73149..000000000000 --- a/changelog.d/14816.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints. diff --git a/changelog.d/14818.doc b/changelog.d/14818.doc deleted file mode 100644 index 7a47cc8ab35e..000000000000 --- a/changelog.d/14818.doc +++ /dev/null @@ -1 +0,0 @@ -Updated documentation in configuration manual for `user_directory.search_all_users`. \ No newline at end of file diff --git a/changelog.d/14819.misc b/changelog.d/14819.misc deleted file mode 100644 index 9c568dbc9cf7..000000000000 --- a/changelog.d/14819.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor push tests. diff --git a/changelog.d/14820.bugfix b/changelog.d/14820.bugfix deleted file mode 100644 index 36e94f2b9b96..000000000000 --- a/changelog.d/14820.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix rare races when using workers. diff --git a/changelog.d/14821.misc b/changelog.d/14821.misc deleted file mode 100644 index 99e4e5e8a12e..000000000000 --- a/changelog.d/14821.misc +++ /dev/null @@ -1 +0,0 @@ -Re-enable some linting that was disabled when we switched to ruff. diff --git a/changelog.d/14822.misc b/changelog.d/14822.misc deleted file mode 100644 index 5e02cc84885d..000000000000 --- a/changelog.d/14822.misc +++ /dev/null @@ -1 +0,0 @@ -Add `cargo fmt` and `cargo clippy` to the lint script. \ No newline at end of file diff --git a/changelog.d/14824.doc b/changelog.d/14824.doc deleted file mode 100644 index 172d37baf251..000000000000 --- a/changelog.d/14824.doc +++ /dev/null @@ -1 +0,0 @@ -Add `worker_manhole` to configuration manual. \ No newline at end of file diff --git a/changelog.d/14825.misc b/changelog.d/14825.misc deleted file mode 100644 index 64312ac09e2a..000000000000 --- a/changelog.d/14825.misc +++ /dev/null @@ -1 +0,0 @@ -Drop unused table `presence`. \ No newline at end of file diff --git a/changelog.d/14826.misc b/changelog.d/14826.misc deleted file mode 100644 index e80673a72167..000000000000 --- a/changelog.d/14826.misc +++ /dev/null @@ -1 +0,0 @@ -Merge the two account data and the two device list replication streams. diff --git a/changelog.d/14832.misc b/changelog.d/14832.misc deleted file mode 100644 index 61e7401e43dc..000000000000 --- a/changelog.d/14832.misc +++ /dev/null @@ -1 +0,0 @@ -Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). diff --git a/changelog.d/14833.misc b/changelog.d/14833.misc deleted file mode 100644 index e80673a72167..000000000000 --- a/changelog.d/14833.misc +++ /dev/null @@ -1 +0,0 @@ -Merge the two account data and the two device list replication streams. diff --git a/changelog.d/14839.feature b/changelog.d/14839.feature deleted file mode 100644 index a4206be007dd..000000000000 --- a/changelog.d/14839.feature +++ /dev/null @@ -1 +0,0 @@ -Faster joins: always serve a partial join response to servers that request it with the stable query param. diff --git a/changelog.d/14841.misc b/changelog.d/14841.misc deleted file mode 100644 index 61e7401e43dc..000000000000 --- a/changelog.d/14841.misc +++ /dev/null @@ -1 +0,0 @@ -Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). diff --git a/changelog.d/14842.bugfix b/changelog.d/14842.bugfix deleted file mode 100644 index 94e0d70cbcc4..000000000000 --- a/changelog.d/14842.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where Synapse would exhaust the stack when processing many federation requests where the remote homeserver has disconencted early. diff --git a/changelog.d/14843.misc b/changelog.d/14843.misc deleted file mode 100644 index bec3c216bc93..000000000000 --- a/changelog.d/14843.misc +++ /dev/null @@ -1 +0,0 @@ -Add a parameter to control whether the federation client performs a partial state join. diff --git a/changelog.d/14844.misc b/changelog.d/14844.misc deleted file mode 100644 index 30ce8663045f..000000000000 --- a/changelog.d/14844.misc +++ /dev/null @@ -1 +0,0 @@ -Add check to avoid starting duplicate partial state syncs. diff --git a/changelog.d/14845.doc b/changelog.d/14845.doc deleted file mode 100644 index dd969aa05cea..000000000000 --- a/changelog.d/14845.doc +++ /dev/null @@ -1 +0,0 @@ -Fix the example config missing the `id` field in [application service documentation](https://matrix-org.github.io/synapse/latest/application_services.html). diff --git a/changelog.d/14848.misc b/changelog.d/14848.misc deleted file mode 100644 index 32aa6c9bc815..000000000000 --- a/changelog.d/14848.misc +++ /dev/null @@ -1 +0,0 @@ -Bump regex from 1.7.0 to 1.7.1. diff --git a/changelog.d/14855.misc b/changelog.d/14855.misc deleted file mode 100644 index f0e292f2878f..000000000000 --- a/changelog.d/14855.misc +++ /dev/null @@ -1 +0,0 @@ -Add an early return when handling no-op presence updates. diff --git a/changelog.d/14856.misc b/changelog.d/14856.misc deleted file mode 100644 index 3731d6cbf184..000000000000 --- a/changelog.d/14856.misc +++ /dev/null @@ -1 +0,0 @@ -Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. diff --git a/changelog.d/14860.removal b/changelog.d/14860.removal deleted file mode 100644 index 3458d38a4825..000000000000 --- a/changelog.d/14860.removal +++ /dev/null @@ -1 +0,0 @@ -Poetry 1.3.2 or higher is now required when `poetry install`ing from source. diff --git a/changelog.d/14861.misc b/changelog.d/14861.misc deleted file mode 100644 index 44813061140d..000000000000 --- a/changelog.d/14861.misc +++ /dev/null @@ -1 +0,0 @@ -Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2. diff --git a/changelog.d/14862.misc b/changelog.d/14862.misc deleted file mode 100644 index b18147669b30..000000000000 --- a/changelog.d/14862.misc +++ /dev/null @@ -1 +0,0 @@ -Bump ruff from 0.0.215 to 0.0.224. diff --git a/changelog.d/14863.misc b/changelog.d/14863.misc deleted file mode 100644 index 9dc092256c9d..000000000000 --- a/changelog.d/14863.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pillow from 9.4.0.0 to 9.4.0.3. diff --git a/changelog.d/14864.bugfix b/changelog.d/14864.bugfix deleted file mode 100644 index 12c0c74ab3cd..000000000000 --- a/changelog.d/14864.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.64.0 when using room version 10 with frozen events enabled. diff --git a/changelog.d/14868.doc b/changelog.d/14868.doc deleted file mode 100644 index b5ddff78a1ca..000000000000 --- a/changelog.d/14868.doc +++ /dev/null @@ -1 +0,0 @@ -Minor corrections to the logging configuration documentation. diff --git a/changelog.d/14870.feature b/changelog.d/14870.feature deleted file mode 100644 index 44f701d1c996..000000000000 --- a/changelog.d/14870.feature +++ /dev/null @@ -1 +0,0 @@ -Faster joins: allow non-lazy-loading ("eager") syncs to complete after a partial join by omitting partial state rooms until they become fully stated. \ No newline at end of file diff --git a/changelog.d/14872.misc b/changelog.d/14872.misc deleted file mode 100644 index 3731d6cbf184..000000000000 --- a/changelog.d/14872.misc +++ /dev/null @@ -1 +0,0 @@ -Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. diff --git a/changelog.d/14873.bugfix b/changelog.d/14873.bugfix deleted file mode 100644 index 9b058576cdb0..000000000000 --- a/changelog.d/14873.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the `populate_room_stats` background job could fail on broken rooms. diff --git a/changelog.d/14874.bugfix b/changelog.d/14874.bugfix deleted file mode 100644 index 91ae2ea9bd5f..000000000000 --- a/changelog.d/14874.bugfix +++ /dev/null @@ -1 +0,0 @@ -Faster joins: Fix a bug in worker deployments where the room stats and user directory would not get updated when finishing a fast join until another event is sent or received. diff --git a/changelog.d/14875.docker b/changelog.d/14875.docker deleted file mode 100644 index 584fc104708d..000000000000 --- a/changelog.d/14875.docker +++ /dev/null @@ -1 +0,0 @@ -Bump default Python version in the Dockerfile from 3.9 to 3.11. diff --git a/changelog.d/14877.misc b/changelog.d/14877.misc deleted file mode 100644 index 4e9c3fa33fdc..000000000000 --- a/changelog.d/14877.misc +++ /dev/null @@ -1 +0,0 @@ -Always notify replication when a stream advances automatically. diff --git a/changelog.d/14881.misc b/changelog.d/14881.misc deleted file mode 100644 index be89d092b6a0..000000000000 --- a/changelog.d/14881.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce max time we wait for stream positions. diff --git a/changelog.d/14882.bugfix b/changelog.d/14882.bugfix deleted file mode 100644 index 1fda344361b5..000000000000 --- a/changelog.d/14882.bugfix +++ /dev/null @@ -1 +0,0 @@ -Faster joins: Fix incompatibility with joins into restricted rooms where no local users have the ability to invite. diff --git a/changelog.d/14883.doc b/changelog.d/14883.doc deleted file mode 100644 index 8e36cb1c3b06..000000000000 --- a/changelog.d/14883.doc +++ /dev/null @@ -1 +0,0 @@ -Document the export user data command. Contributed by @thezaidbintariq. \ No newline at end of file diff --git a/changelog.d/14885.misc b/changelog.d/14885.misc deleted file mode 100644 index 9f5384e60e78..000000000000 --- a/changelog.d/14885.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints. \ No newline at end of file diff --git a/changelog.d/14889.misc b/changelog.d/14889.misc deleted file mode 100644 index 9f5384e60e78..000000000000 --- a/changelog.d/14889.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints. \ No newline at end of file diff --git a/changelog.d/14896.misc b/changelog.d/14896.misc deleted file mode 100644 index 4f8a6c3f1756..000000000000 --- a/changelog.d/14896.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-opentracing from 2.4.10 to 2.4.10.1. diff --git a/changelog.d/14897.misc b/changelog.d/14897.misc deleted file mode 100644 index d192fa1c2f85..000000000000 --- a/changelog.d/14897.misc +++ /dev/null @@ -1 +0,0 @@ -Bump ruff from 0.0.224 to 0.0.230. diff --git a/changelog.d/14899.misc b/changelog.d/14899.misc deleted file mode 100644 index f1ca951ec049..000000000000 --- a/changelog.d/14899.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-requests from 2.28.11.7 to 2.28.11.8. diff --git a/changelog.d/14900.misc b/changelog.d/14900.misc deleted file mode 100644 index 69d6edb90752..000000000000 --- a/changelog.d/14900.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4. diff --git a/changelog.d/14901.misc b/changelog.d/14901.misc deleted file mode 100644 index 21ccec0063b8..000000000000 --- a/changelog.d/14901.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-commonmark from 0.9.2 to 0.9.2.1. diff --git a/changelog.d/14905.feature b/changelog.d/14905.feature deleted file mode 100644 index f13a4af9815a..000000000000 --- a/changelog.d/14905.feature +++ /dev/null @@ -1 +0,0 @@ -Faster joins: request partial joins by default. Admins can opt-out of this for the time being---see the upgrade notes. diff --git a/changelog.d/14910.bugfix b/changelog.d/14910.bugfix deleted file mode 100644 index f1f34cd6badb..000000000000 --- a/changelog.d/14910.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. diff --git a/changelog.d/14912.misc b/changelog.d/14912.misc deleted file mode 100644 index 9dbc6b3424af..000000000000 --- a/changelog.d/14912.misc +++ /dev/null @@ -1 +0,0 @@ -Faster joins: allow the resync process more time to fetch `/state` ids. diff --git a/debian/changelog b/debian/changelog index ee7439f38f27..05333662955c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,9 @@ -matrix-synapse-py3 (1.75.1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.76.0~rc1) stable; urgency=medium * Use Poetry 1.3.2 to manage the bundled virtualenv included with this package. + * New Synapse release 1.76.0rc1. - -- Synapse Packaging team Tue, 17 Jan 2023 15:08:00 +0000 + -- Synapse Packaging team Wed, 25 Jan 2023 16:21:16 +0000 matrix-synapse-py3 (1.75.0) stable; urgency=medium diff --git a/pyproject.toml b/pyproject.toml index 400eec6ac200..0d671d25835b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.75.0" +version = "1.76.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 48e3ad8a06dbdf8fd11409c1bd4a692eb3364b45 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 25 Jan 2023 16:41:32 +0000 Subject: [PATCH 065/344] Group dependabot lines --- CHANGES.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 46fd48dd7df9..991b984841ec 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -69,20 +69,20 @@ Internal Changes - Faster joins: use stable identifiers from [MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706). ([\#14832](https://github.com/matrix-org/synapse/issues/14832), [\#14841](https://github.com/matrix-org/synapse/issues/14841)) - Add a parameter to control whether the federation client performs a partial state join. ([\#14843](https://github.com/matrix-org/synapse/issues/14843)) - Add check to avoid starting duplicate partial state syncs. ([\#14844](https://github.com/matrix-org/synapse/issues/14844)) -- Bump regex from 1.7.0 to 1.7.1. ([\#14848](https://github.com/matrix-org/synapse/issues/14848)) - Add an early return when handling no-op presence updates. ([\#14855](https://github.com/matrix-org/synapse/issues/14855)) - Fix `wait_for_stream_position` to correctly wait for the right instance to advance its token. ([\#14856](https://github.com/matrix-org/synapse/issues/14856), [\#14872](https://github.com/matrix-org/synapse/issues/14872)) +- Always notify replication when a stream advances automatically. ([\#14877](https://github.com/matrix-org/synapse/issues/14877)) +- Reduce max time we wait for stream positions. ([\#14881](https://github.com/matrix-org/synapse/issues/14881)) +- Faster joins: allow the resync process more time to fetch `/state` ids. ([\#14912](https://github.com/matrix-org/synapse/issues/14912)) +- Bump regex from 1.7.0 to 1.7.1. ([\#14848](https://github.com/matrix-org/synapse/issues/14848)) - Bump peaceiris/actions-gh-pages from 3.9.1 to 3.9.2. ([\#14861](https://github.com/matrix-org/synapse/issues/14861)) - Bump ruff from 0.0.215 to 0.0.224. ([\#14862](https://github.com/matrix-org/synapse/issues/14862)) - Bump types-pillow from 9.4.0.0 to 9.4.0.3. ([\#14863](https://github.com/matrix-org/synapse/issues/14863)) -- Always notify replication when a stream advances automatically. ([\#14877](https://github.com/matrix-org/synapse/issues/14877)) -- Reduce max time we wait for stream positions. ([\#14881](https://github.com/matrix-org/synapse/issues/14881)) - Bump types-opentracing from 2.4.10 to 2.4.10.1. ([\#14896](https://github.com/matrix-org/synapse/issues/14896)) - Bump ruff from 0.0.224 to 0.0.230. ([\#14897](https://github.com/matrix-org/synapse/issues/14897)) - Bump types-requests from 2.28.11.7 to 2.28.11.8. ([\#14899](https://github.com/matrix-org/synapse/issues/14899)) - Bump types-psycopg2 from 2.9.21.2 to 2.9.21.4. ([\#14900](https://github.com/matrix-org/synapse/issues/14900)) - Bump types-commonmark from 0.9.2 to 0.9.2.1. ([\#14901](https://github.com/matrix-org/synapse/issues/14901)) -- Faster joins: allow the resync process more time to fetch `/state` ids. ([\#14912](https://github.com/matrix-org/synapse/issues/14912)) Synapse 1.75.0 (2023-01-17) From 5f25fa358d6fd564550c8c4c76da27b4ad9f58f8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 25 Jan 2023 16:41:42 +0000 Subject: [PATCH 066/344] Touch-up the features section --- CHANGES.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 991b984841ec..55961f2e02d3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,10 +5,10 @@ Features -------- - Update the default room version to [v10](https://spec.matrix.org/v1.5/rooms/v10/) ([MSC 3904](https://github.com/matrix-org/matrix-spec-proposals/pull/3904)). Contributed by @FSG-Cat. ([\#14111](https://github.com/matrix-org/synapse/issues/14111)) -- Adds a `set_displayname()` method to the module API for setting a user's display name. ([\#14629](https://github.com/matrix-org/synapse/issues/14629)) +- Add a `set_displayname()` method to the module API for setting a user's display name. ([\#14629](https://github.com/matrix-org/synapse/issues/14629)) - Add a dedicated listener configuration for `health` endpoint. ([\#14747](https://github.com/matrix-org/synapse/issues/14747)) -- Implement support for MSC3890: Remotely silence local notifications. ([\#14775](https://github.com/matrix-org/synapse/issues/14775)) -- Implement experimental support for MSC3930: Push rules for (MSC3381) Polls. ([\#14787](https://github.com/matrix-org/synapse/issues/14787)) +- Implement support for [MSC3890](https://github.com/matrix-org/matrix-spec-proposals/pull/3890): Remotely silence local notifications. ([\#14775](https://github.com/matrix-org/synapse/issues/14775)) +- Implement experimental support for [MSC3930](https://github.com/matrix-org/matrix-spec-proposals/pull/3930): Push rules for ([MSC3381](https://github.com/matrix-org/matrix-spec-proposals/pull/3381)) Polls. ([\#14787](https://github.com/matrix-org/synapse/issues/14787)) - Per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925), bundle the whole of the replacement with any edited events, and optionally inhibit server-side replacement. ([\#14811](https://github.com/matrix-org/synapse/issues/14811)) - Faster joins: always serve a partial join response to servers that request it with the stable query param. ([\#14839](https://github.com/matrix-org/synapse/issues/14839)) - Faster joins: allow non-lazy-loading ("eager") syncs to complete after a partial join by omitting partial state rooms until they become fully stated. ([\#14870](https://github.com/matrix-org/synapse/issues/14870)) From 58fa1ed21e45c8a653ffade739acc1116aa18d99 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 25 Jan 2023 16:41:50 +0000 Subject: [PATCH 067/344] Refer to upgrade notes --- CHANGES.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 55961f2e02d3..8d382c91dd66 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,12 @@ Synapse 1.76.0rc1 (2023-01-25) ============================== +This release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/upgrade.md#faster-joins-are-enabled-by-default for more details. + +The upgrade from 1.75 to 1.76 changes the account data replication streams in a backwards-incompatible manner. Server operators running a multi-worker deployment should consult [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#changes-to-the-account-data-replication-streams). + +Those who are `poetry install`ing from source using our lockfile should ensure their poetry version is 1.3.2 or higher; [see upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#minimum-version-of-poetry-is-now-132). + Features -------- From f51035bc874ec72329080e1290dcb59a3dace8db Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 25 Jan 2023 16:44:04 +0000 Subject: [PATCH 068/344] Fix link syntax in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 8d382c91dd66..cc5ee1619015 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.76.0rc1 (2023-01-25) ============================== -This release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/upgrade.md#faster-joins-are-enabled-by-default for more details. +This release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/upgrade.md#faster-joins-are-enabled-by-default) for more details. The upgrade from 1.75 to 1.76 changes the account data replication streams in a backwards-incompatible manner. Server operators running a multi-worker deployment should consult [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#changes-to-the-account-data-replication-streams). From 836c592f15b1511c4b367f2778c0e3d1a89dba09 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 25 Jan 2023 20:38:20 +0100 Subject: [PATCH 069/344] Fix type hints in knocking tests. (#14887) --- changelog.d/14887.misc | 1 + mypy.ini | 1 - tests/federation/transport/test_knocking.py | 6 +++--- tests/rest/client/test_sync.py | 4 +--- 4 files changed, 5 insertions(+), 7 deletions(-) create mode 100644 changelog.d/14887.misc diff --git a/changelog.d/14887.misc b/changelog.d/14887.misc new file mode 100644 index 000000000000..9f5384e60e78 --- /dev/null +++ b/changelog.d/14887.misc @@ -0,0 +1 @@ +Add missing type hints. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 63366dad99cc..248402532e87 100644 --- a/mypy.ini +++ b/mypy.ini @@ -39,7 +39,6 @@ exclude = (?x) |tests/events/test_utils.py |tests/federation/test_federation_catch_up.py |tests/federation/test_federation_sender.py - |tests/federation/transport/test_knocking.py |tests/handlers/test_typing.py |tests/http/federation/test_matrix_federation_agent.py |tests/http/federation/test_srv_resolver.py diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index d21c11b716cd..ff589c0b6ca0 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -23,10 +23,10 @@ from synapse.types import RoomAlias from tests.test_utils import event_injection -from tests.unittest import FederatingHomeserverTestCase, TestCase +from tests.unittest import FederatingHomeserverTestCase, HomeserverTestCase -class KnockingStrippedStateEventHelperMixin(TestCase): +class KnockingStrippedStateEventHelperMixin(HomeserverTestCase): def send_example_state_events_to_room( self, hs: "HomeServer", @@ -49,7 +49,7 @@ def send_example_state_events_to_room( # To set a canonical alias, we'll need to point an alias at the room first. canonical_alias = "#fancy_alias:test" self.get_success( - self.store.create_room_alias_association( + self.hs.get_datastores().main.create_room_alias_association( RoomAlias.from_string(canonical_alias), room_id, ["test"] ) ) diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index c9afa0f3dd05..b9047194dd63 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -294,9 +294,7 @@ def test_sync_backwards_typing(self) -> None: self.make_request("GET", sync_url % (access_token, next_batch)) -class SyncKnockTestCase( - unittest.HomeserverTestCase, KnockingStrippedStateEventHelperMixin -): +class SyncKnockTestCase(KnockingStrippedStateEventHelperMixin): servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, From 8bc5d1406cb24730fd87facb8c5b29a76360782e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 25 Jan 2023 14:49:37 -0500 Subject: [PATCH 070/344] Document how to handle Dependabot pull requests. (#14916) --- changelog.d/14916.misc | 1 + docs/development/dependencies.md | 14 ++++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 changelog.d/14916.misc diff --git a/changelog.d/14916.misc b/changelog.d/14916.misc new file mode 100644 index 000000000000..59914d4b8afb --- /dev/null +++ b/changelog.d/14916.misc @@ -0,0 +1 @@ +Document how to handle Dependabot pull requests. diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md index b734cc5826df..c4449c51f780 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md @@ -258,6 +258,20 @@ because [`build`](https://github.com/pypa/build) is a standardish tool which doesn't require poetry. (It's what we use in CI too). However, you could try `poetry build` too. +## ...handle a Dependabot pull request? + +Synapse uses Dependabot to keep the `poetry.lock` file up-to-date. When it +creates a pull request a GitHub Action will run to automatically create a changelog +file. Ensure that: + +* the lockfile changes look reasonable; +* the upstream changelog file (linked in the description) doesn't include any + breaking changes; +* continuous integration passes (due to permissions, the GitHub Actions run on + the changelog commit will fail, look at the initial commit of the pull request); + +In particular, any updates to the type hints (usually packages which start with `types-`) +should be safe to merge if linting passes. # Troubleshooting From 3c3ba31507cbff27064ea3c6cf1e7add9583556a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 25 Jan 2023 15:14:03 -0500 Subject: [PATCH 071/344] Add missing type hints for tests.events. (#14904) --- changelog.d/14904.misc | 1 + mypy.ini | 5 +- synapse/events/utils.py | 3 +- tests/events/test_presence_router.py | 58 ++++++++++++++--------- tests/events/test_snapshot.py | 17 +++++-- tests/events/test_utils.py | 71 +++++++++++++++------------- 6 files changed, 91 insertions(+), 64 deletions(-) create mode 100644 changelog.d/14904.misc diff --git a/changelog.d/14904.misc b/changelog.d/14904.misc new file mode 100644 index 000000000000..d44571b73149 --- /dev/null +++ b/changelog.d/14904.misc @@ -0,0 +1 @@ +Add missing type hints. diff --git a/mypy.ini b/mypy.ini index 248402532e87..13890ce12498 100644 --- a/mypy.ini +++ b/mypy.ini @@ -35,8 +35,6 @@ exclude = (?x) |tests/api/test_auth.py |tests/app/test_openid_listener.py |tests/appservice/test_scheduler.py - |tests/events/test_presence_router.py - |tests/events/test_utils.py |tests/federation/test_federation_catch_up.py |tests/federation/test_federation_sender.py |tests/handlers/test_typing.py @@ -86,6 +84,9 @@ disallow_untyped_defs = True [mypy-tests.crypto.*] disallow_untyped_defs = True +[mypy-tests.events.*] +disallow_untyped_defs = True + [mypy-tests.federation.transport.test_client] disallow_untyped_defs = True diff --git a/synapse/events/utils.py b/synapse/events/utils.py index ae57a4df5ee8..52e4b467e876 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -605,10 +605,11 @@ def serialize_events( _PowerLevel = Union[str, int] +PowerLevelsContent = Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]] def copy_and_fixup_power_levels_contents( - old_power_levels: Mapping[str, Union[_PowerLevel, Mapping[str, _PowerLevel]]] + old_power_levels: PowerLevelsContent, ) -> Dict[str, Union[int, Dict[str, int]]]: """Copy the content of a power_levels event, unfreezing frozendicts along the way. diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index b703e4472e9e..a9893def7422 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -16,6 +16,8 @@ import attr +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EduTypes from synapse.events.presence_router import PresenceRouter, load_legacy_presence_router from synapse.federation.units import Transaction @@ -23,11 +25,13 @@ from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client import login, presence, room +from synapse.server import HomeServer from synapse.types import JsonDict, StreamToken, create_requester +from synapse.util import Clock from tests.handlers.test_sync import generate_sync_config from tests.test_utils import simple_async_mock -from tests.unittest import FederatingHomeserverTestCase, TestCase, override_config +from tests.unittest import FederatingHomeserverTestCase, override_config @attr.s @@ -49,9 +53,7 @@ async def get_users_for_states( } return users_to_state - async def get_interested_users( - self, user_id: str - ) -> Union[Set[str], PresenceRouter.ALL_USERS]: + async def get_interested_users(self, user_id: str) -> Union[Set[str], str]: if user_id in self._config.users_who_should_receive_all_presence: return PresenceRouter.ALL_USERS @@ -71,9 +73,14 @@ def parse_config(config_dict: dict) -> PresenceRouterTestConfig: # Initialise a typed config object config = PresenceRouterTestConfig() - config.users_who_should_receive_all_presence = config_dict.get( + users_who_should_receive_all_presence = config_dict.get( "users_who_should_receive_all_presence" ) + assert isinstance(users_who_should_receive_all_presence, list) + + config.users_who_should_receive_all_presence = ( + users_who_should_receive_all_presence + ) return config @@ -96,9 +103,7 @@ async def get_users_for_states( } return users_to_state - async def get_interested_users( - self, user_id: str - ) -> Union[Set[str], PresenceRouter.ALL_USERS]: + async def get_interested_users(self, user_id: str) -> Union[Set[str], str]: if user_id in self._config.users_who_should_receive_all_presence: return PresenceRouter.ALL_USERS @@ -118,9 +123,14 @@ def parse_config(config_dict: dict) -> PresenceRouterTestConfig: # Initialise a typed config object config = PresenceRouterTestConfig() - config.users_who_should_receive_all_presence = config_dict.get( + users_who_should_receive_all_presence = config_dict.get( "users_who_should_receive_all_presence" ) + assert isinstance(users_who_should_receive_all_presence, list) + + config.users_who_should_receive_all_presence = ( + users_who_should_receive_all_presence + ) return config @@ -140,7 +150,7 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase): presence.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # Mock out the calls over federation. fed_transport_client = Mock(spec=["send_transaction"]) fed_transport_client.send_transaction = simple_async_mock({}) @@ -153,7 +163,9 @@ def make_homeserver(self, reactor, clock): return hs - def prepare(self, reactor, clock, homeserver): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: self.sync_handler = self.hs.get_sync_handler() self.module_api = homeserver.get_module_api() @@ -176,7 +188,7 @@ def default_config(self) -> JsonDict: }, } ) - def test_receiving_all_presence_legacy(self): + def test_receiving_all_presence_legacy(self) -> None: self.receiving_all_presence_test_body() @override_config( @@ -193,10 +205,10 @@ def test_receiving_all_presence_legacy(self): ], } ) - def test_receiving_all_presence(self): + def test_receiving_all_presence(self) -> None: self.receiving_all_presence_test_body() - def receiving_all_presence_test_body(self): + def receiving_all_presence_test_body(self) -> None: """Test that a user that does not share a room with another other can receive presence for them, due to presence routing. """ @@ -302,7 +314,7 @@ def receiving_all_presence_test_body(self): }, } ) - def test_send_local_online_presence_to_with_module_legacy(self): + def test_send_local_online_presence_to_with_module_legacy(self) -> None: self.send_local_online_presence_to_with_module_test_body() @override_config( @@ -321,10 +333,10 @@ def test_send_local_online_presence_to_with_module_legacy(self): ], } ) - def test_send_local_online_presence_to_with_module(self): + def test_send_local_online_presence_to_with_module(self) -> None: self.send_local_online_presence_to_with_module_test_body() - def send_local_online_presence_to_with_module_test_body(self): + def send_local_online_presence_to_with_module_test_body(self) -> None: """Tests that send_local_presence_to_users sends local online presence to a set of specified local and remote users, with a custom PresenceRouter module enabled. """ @@ -447,18 +459,18 @@ def send_local_online_presence_to_with_module_test_body(self): continue # EDUs can contain multiple presence updates - for presence_update in edu["content"]["push"]: + for presence_edu in edu["content"]["push"]: # Check for presence updates that contain the user IDs we're after - found_users.add(presence_update["user_id"]) + found_users.add(presence_edu["user_id"]) # Ensure that no offline states are being sent out - self.assertNotEqual(presence_update["presence"], "offline") + self.assertNotEqual(presence_edu["presence"], "offline") self.assertEqual(found_users, expected_users) def send_presence_update( - testcase: TestCase, + testcase: FederatingHomeserverTestCase, user_id: str, access_token: str, presence_state: str, @@ -479,7 +491,7 @@ def send_presence_update( def sync_presence( - testcase: TestCase, + testcase: FederatingHomeserverTestCase, user_id: str, since_token: Optional[StreamToken] = None, ) -> Tuple[List[UserPresenceState], StreamToken]: @@ -500,7 +512,7 @@ def sync_presence( requester = create_requester(user_id) sync_config = generate_sync_config(requester.user.to_string()) sync_result = testcase.get_success( - testcase.sync_handler.wait_for_sync_for_user( + testcase.hs.get_sync_handler().wait_for_sync_for_user( requester, sync_config, since_token ) ) diff --git a/tests/events/test_snapshot.py b/tests/events/test_snapshot.py index 8ddce83b830d..6687c28e8fea 100644 --- a/tests/events/test_snapshot.py +++ b/tests/events/test_snapshot.py @@ -12,9 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. +from twisted.test.proto_helpers import MemoryReactor + +from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.rest import admin from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.test_utils.event_injection import create_event @@ -27,7 +32,7 @@ class TestEventContext(unittest.HomeserverTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() @@ -35,7 +40,7 @@ def prepare(self, reactor, clock, hs): self.user_tok = self.login("u1", "pass") self.room_id = self.helper.create_room_as(tok=self.user_tok) - def test_serialize_deserialize_msg(self): + def test_serialize_deserialize_msg(self) -> None: """Test that an EventContext for a message event is the same after serialize/deserialize. """ @@ -51,7 +56,7 @@ def test_serialize_deserialize_msg(self): self._check_serialize_deserialize(event, context) - def test_serialize_deserialize_state_no_prev(self): + def test_serialize_deserialize_state_no_prev(self) -> None: """Test that an EventContext for a state event (with not previous entry) is the same after serialize/deserialize. """ @@ -67,7 +72,7 @@ def test_serialize_deserialize_state_no_prev(self): self._check_serialize_deserialize(event, context) - def test_serialize_deserialize_state_prev(self): + def test_serialize_deserialize_state_prev(self) -> None: """Test that an EventContext for a state event (which replaces a previous entry) is the same after serialize/deserialize. """ @@ -84,7 +89,9 @@ def test_serialize_deserialize_state_prev(self): self._check_serialize_deserialize(event, context) - def _check_serialize_deserialize(self, event, context): + def _check_serialize_deserialize( + self, event: EventBase, context: EventContext + ) -> None: serialized = self.get_success(context.serialize(event, self.store)) d_context = EventContext.deserialize(self._storage_controllers, serialized) diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index a79256846fb3..ff7b349d756c 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -13,21 +13,24 @@ # limitations under the License. import unittest as stdlib_unittest +from typing import Any, List, Mapping, Optional from synapse.api.constants import EventContentFields from synapse.api.room_versions import RoomVersions -from synapse.events import make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.events.utils import ( + PowerLevelsContent, SerializeEventConfig, copy_and_fixup_power_levels_contents, maybe_upsert_event_field, prune_event, serialize_event, ) +from synapse.types import JsonDict from synapse.util.frozenutils import freeze -def MockEvent(**kwargs): +def MockEvent(**kwargs: Any) -> EventBase: if "event_id" not in kwargs: kwargs["event_id"] = "fake_event_id" if "type" not in kwargs: @@ -60,7 +63,7 @@ def test_update_not_okay_leaves_original_value(self) -> None: class PruneEventTestCase(stdlib_unittest.TestCase): - def run_test(self, evdict, matchdict, **kwargs): + def run_test(self, evdict: JsonDict, matchdict: JsonDict, **kwargs: Any) -> None: """ Asserts that a new event constructed with `evdict` will look like `matchdict` when it is redacted. @@ -74,7 +77,7 @@ def run_test(self, evdict, matchdict, **kwargs): prune_event(make_event_from_dict(evdict, **kwargs)).get_dict(), matchdict ) - def test_minimal(self): + def test_minimal(self) -> None: self.run_test( {"type": "A", "event_id": "$test:domain"}, { @@ -86,7 +89,7 @@ def test_minimal(self): }, ) - def test_basic_keys(self): + def test_basic_keys(self) -> None: """Ensure that the keys that should be untouched are kept.""" # Note that some of the values below don't really make sense, but the # pruning of events doesn't worry about the values of any fields (with @@ -138,7 +141,7 @@ def test_basic_keys(self): room_version=RoomVersions.MSC2176, ) - def test_unsigned(self): + def test_unsigned(self) -> None: """Ensure that unsigned properties get stripped (except age_ts and replaces_state).""" self.run_test( { @@ -159,7 +162,7 @@ def test_unsigned(self): }, ) - def test_content(self): + def test_content(self) -> None: """The content dictionary should be stripped in most cases.""" self.run_test( {"type": "C", "event_id": "$test:domain", "content": {"things": "here"}}, @@ -194,7 +197,7 @@ def test_content(self): }, ) - def test_create(self): + def test_create(self) -> None: """Create events are partially redacted until MSC2176.""" self.run_test( { @@ -223,7 +226,7 @@ def test_create(self): room_version=RoomVersions.MSC2176, ) - def test_power_levels(self): + def test_power_levels(self) -> None: """Power level events keep a variety of content keys.""" self.run_test( { @@ -273,7 +276,7 @@ def test_power_levels(self): room_version=RoomVersions.MSC2176, ) - def test_alias_event(self): + def test_alias_event(self) -> None: """Alias events have special behavior up through room version 6.""" self.run_test( { @@ -302,7 +305,7 @@ def test_alias_event(self): room_version=RoomVersions.V6, ) - def test_redacts(self): + def test_redacts(self) -> None: """Redaction events have no special behaviour until MSC2174/MSC2176.""" self.run_test( @@ -328,7 +331,7 @@ def test_redacts(self): room_version=RoomVersions.MSC2176, ) - def test_join_rules(self): + def test_join_rules(self) -> None: """Join rules events have changed behavior starting with MSC3083.""" self.run_test( { @@ -371,7 +374,7 @@ def test_join_rules(self): room_version=RoomVersions.V8, ) - def test_member(self): + def test_member(self) -> None: """Member events have changed behavior starting with MSC3375.""" self.run_test( { @@ -417,12 +420,12 @@ def test_member(self): class SerializeEventTestCase(stdlib_unittest.TestCase): - def serialize(self, ev, fields): + def serialize(self, ev: EventBase, fields: Optional[List[str]]) -> JsonDict: return serialize_event( ev, 1479807801915, config=SerializeEventConfig(only_event_fields=fields) ) - def test_event_fields_works_with_keys(self): + def test_event_fields_works_with_keys(self) -> None: self.assertEqual( self.serialize( MockEvent(sender="@alice:localhost", room_id="!foo:bar"), ["room_id"] @@ -430,7 +433,7 @@ def test_event_fields_works_with_keys(self): {"room_id": "!foo:bar"}, ) - def test_event_fields_works_with_nested_keys(self): + def test_event_fields_works_with_nested_keys(self) -> None: self.assertEqual( self.serialize( MockEvent( @@ -443,7 +446,7 @@ def test_event_fields_works_with_nested_keys(self): {"content": {"body": "A message"}}, ) - def test_event_fields_works_with_dot_keys(self): + def test_event_fields_works_with_dot_keys(self) -> None: self.assertEqual( self.serialize( MockEvent( @@ -456,7 +459,7 @@ def test_event_fields_works_with_dot_keys(self): {"content": {"key.with.dots": {}}}, ) - def test_event_fields_works_with_nested_dot_keys(self): + def test_event_fields_works_with_nested_dot_keys(self) -> None: self.assertEqual( self.serialize( MockEvent( @@ -472,7 +475,7 @@ def test_event_fields_works_with_nested_dot_keys(self): {"content": {"nested.dot.key": {"leaf.key": 42}}}, ) - def test_event_fields_nops_with_unknown_keys(self): + def test_event_fields_nops_with_unknown_keys(self) -> None: self.assertEqual( self.serialize( MockEvent( @@ -485,7 +488,7 @@ def test_event_fields_nops_with_unknown_keys(self): {"content": {"foo": "bar"}}, ) - def test_event_fields_nops_with_non_dict_keys(self): + def test_event_fields_nops_with_non_dict_keys(self) -> None: self.assertEqual( self.serialize( MockEvent( @@ -498,7 +501,7 @@ def test_event_fields_nops_with_non_dict_keys(self): {}, ) - def test_event_fields_nops_with_array_keys(self): + def test_event_fields_nops_with_array_keys(self) -> None: self.assertEqual( self.serialize( MockEvent( @@ -511,7 +514,7 @@ def test_event_fields_nops_with_array_keys(self): {}, ) - def test_event_fields_all_fields_if_empty(self): + def test_event_fields_all_fields_if_empty(self) -> None: self.assertEqual( self.serialize( MockEvent( @@ -531,16 +534,16 @@ def test_event_fields_all_fields_if_empty(self): }, ) - def test_event_fields_fail_if_fields_not_str(self): + def test_event_fields_fail_if_fields_not_str(self) -> None: with self.assertRaises(TypeError): self.serialize( - MockEvent(room_id="!foo:bar", content={"foo": "bar"}), ["room_id", 4] + MockEvent(room_id="!foo:bar", content={"foo": "bar"}), ["room_id", 4] # type: ignore[list-item] ) class CopyPowerLevelsContentTestCase(stdlib_unittest.TestCase): def setUp(self) -> None: - self.test_content = { + self.test_content: PowerLevelsContent = { "ban": 50, "events": {"m.room.name": 100, "m.room.power_levels": 100}, "events_default": 0, @@ -553,10 +556,11 @@ def setUp(self) -> None: "users_default": 0, } - def _test(self, input): + def _test(self, input: PowerLevelsContent) -> None: a = copy_and_fixup_power_levels_contents(input) self.assertEqual(a["ban"], 50) + assert isinstance(a["events"], Mapping) self.assertEqual(a["events"]["m.room.name"], 100) # make sure that changing the copy changes the copy and not the orig @@ -564,18 +568,19 @@ def _test(self, input): a["events"]["m.room.power_levels"] = 20 self.assertEqual(input["ban"], 50) + assert isinstance(input["events"], Mapping) self.assertEqual(input["events"]["m.room.power_levels"], 100) - def test_unfrozen(self): + def test_unfrozen(self) -> None: self._test(self.test_content) - def test_frozen(self): + def test_frozen(self) -> None: input = freeze(self.test_content) self._test(input) - def test_stringy_integers(self): + def test_stringy_integers(self) -> None: """String representations of decimal integers are converted to integers.""" - input = { + input: PowerLevelsContent = { "a": "100", "b": { "foo": 99, @@ -603,9 +608,9 @@ def test_strings_that_dont_represent_decimal_integers(self) -> None: def test_invalid_types_raise_type_error(self) -> None: with self.assertRaises(TypeError): - copy_and_fixup_power_levels_contents({"a": ["hello", "grandma"]}) # type: ignore[arg-type] - copy_and_fixup_power_levels_contents({"a": None}) # type: ignore[arg-type] + copy_and_fixup_power_levels_contents({"a": ["hello", "grandma"]}) # type: ignore[dict-item] + copy_and_fixup_power_levels_contents({"a": None}) # type: ignore[dict-item] def test_invalid_nesting_raises_type_error(self) -> None: with self.assertRaises(TypeError): - copy_and_fixup_power_levels_contents({"a": {"b": {"c": 1}}}) + copy_and_fixup_power_levels_contents({"a": {"b": {"c": 1}}}) # type: ignore[dict-item] From 7e8d455280b58dbda3ff24b19dbffad2d6c6c253 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 25 Jan 2023 16:34:37 -0500 Subject: [PATCH 072/344] Fix a bug in the send_local_online_presence_to module API (#14880) Destination was being used incorrectly (a single destination instead of a list of destinations was being passed). This also updates some of the types in the area to not use Collection[str], which is a footgun. --- changelog.d/14880.bugfix | 1 + synapse/handlers/presence.py | 18 ++++++++++++------ synapse/module_api/__init__.py | 2 +- synapse/notifier.py | 3 ++- synapse/streams/__init__.py | 6 +++--- 5 files changed, 19 insertions(+), 11 deletions(-) create mode 100644 changelog.d/14880.bugfix diff --git a/changelog.d/14880.bugfix b/changelog.d/14880.bugfix new file mode 100644 index 000000000000..e56c567082a5 --- /dev/null +++ b/changelog.d/14880.bugfix @@ -0,0 +1 @@ +Fix a bug when using the `send_local_online_presence_to` module API. diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 43e4e7b1b4c2..87af31aa2706 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -64,7 +64,13 @@ from synapse.replication.tcp.streams import PresenceFederationStream, PresenceStream from synapse.storage.databases.main import DataStore from synapse.streams import EventSource -from synapse.types import JsonDict, StreamKeyType, UserID, get_domain_from_id +from synapse.types import ( + JsonDict, + StrCollection, + StreamKeyType, + UserID, + get_domain_from_id, +) from synapse.util.async_helpers import Linearizer from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -320,7 +326,7 @@ async def maybe_send_presence_to_interested_destinations( for destination, host_states in hosts_to_states.items(): self._federation.send_presence_to_destinations(host_states, [destination]) - async def send_full_presence_to_users(self, user_ids: Collection[str]) -> None: + async def send_full_presence_to_users(self, user_ids: StrCollection) -> None: """ Adds to the list of users who should receive a full snapshot of presence upon their next sync. Note that this only works for local users. @@ -1601,7 +1607,7 @@ async def get_new_events( # Having a default limit doesn't match the EventSource API, but some # callers do not provide it. It is unused in this class. limit: int = 0, - room_ids: Optional[Collection[str]] = None, + room_ids: Optional[StrCollection] = None, is_guest: bool = False, explicit_room_id: Optional[str] = None, include_offline: bool = True, @@ -1688,7 +1694,7 @@ async def get_new_events( # The set of users that we're interested in and that have had a presence update. # We'll actually pull the presence updates for these users at the end. - interested_and_updated_users: Collection[str] + interested_and_updated_users: StrCollection if from_key is not None: # First get all users that have had a presence update @@ -2120,7 +2126,7 @@ def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler): # stream_id, destinations, user_ids)`. We don't store the full states # for efficiency, and remote workers will already have the full states # cached. - self._queue: List[Tuple[int, int, Collection[str], Set[str]]] = [] + self._queue: List[Tuple[int, int, StrCollection, Set[str]]] = [] self._next_id = 1 @@ -2142,7 +2148,7 @@ def _clear_queue(self) -> None: self._queue = self._queue[index:] def send_presence_to_destinations( - self, states: Collection[UserPresenceState], destinations: Collection[str] + self, states: Collection[UserPresenceState], destinations: StrCollection ) -> None: """Send the presence states to the given destinations. diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 6153a48257b3..d22dd19d388a 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1158,7 +1158,7 @@ async def send_local_online_presence_to(self, users: Iterable[str]) -> None: # Send to remote destinations. destination = UserID.from_string(user).domain presence_handler.get_federation_queue().send_presence_to_destinations( - presence_events, destination + presence_events, [destination] ) def looping_background_call( diff --git a/synapse/notifier.py b/synapse/notifier.py index 2b0e52f23c33..a8832a3f8e80 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -46,6 +46,7 @@ JsonDict, PersistedEventPosition, RoomStreamToken, + StrCollection, StreamKeyType, StreamToken, UserID, @@ -716,7 +717,7 @@ async def check_for_updates( async def _get_room_ids( self, user: UserID, explicit_room_id: Optional[str] - ) -> Tuple[Collection[str], bool]: + ) -> Tuple[StrCollection, bool]: joined_room_ids = await self.store.get_rooms_for_user(user.to_string()) if explicit_room_id: if explicit_room_id in joined_room_ids: diff --git a/synapse/streams/__init__.py b/synapse/streams/__init__.py index 2dcd43d0a2f5..c6c8a0315c9b 100644 --- a/synapse/streams/__init__.py +++ b/synapse/streams/__init__.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Collection, Generic, List, Optional, Tuple, TypeVar +from typing import Generic, List, Optional, Tuple, TypeVar -from synapse.types import UserID +from synapse.types import StrCollection, UserID # The key, this is either a stream token or int. K = TypeVar("K") @@ -28,7 +28,7 @@ async def get_new_events( user: UserID, from_key: K, limit: int, - room_ids: Collection[str], + room_ids: StrCollection, is_guest: bool, explicit_room_id: Optional[str] = None, ) -> Tuple[List[R], K]: From 871ff05addfabecafe3e5d906f63fcc7af895cd5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 26 Jan 2023 11:15:50 +0100 Subject: [PATCH 073/344] Fix type hints in typing edu unit tests (#14886) --- changelog.d/14886.misc | 1 + mypy.ini | 1 - tests/handlers/test_typing.py | 99 +++++++++++++++++----------- tests/storage/test_user_directory.py | 5 +- tests/unittest.py | 3 +- 5 files changed, 66 insertions(+), 43 deletions(-) create mode 100644 changelog.d/14886.misc diff --git a/changelog.d/14886.misc b/changelog.d/14886.misc new file mode 100644 index 000000000000..9f5384e60e78 --- /dev/null +++ b/changelog.d/14886.misc @@ -0,0 +1 @@ +Add missing type hints. \ No newline at end of file diff --git a/mypy.ini b/mypy.ini index 13890ce12498..e57bc6426156 100644 --- a/mypy.ini +++ b/mypy.ini @@ -37,7 +37,6 @@ exclude = (?x) |tests/appservice/test_scheduler.py |tests/federation/test_federation_catch_up.py |tests/federation/test_federation_sender.py - |tests/handlers/test_typing.py |tests/http/federation/test_matrix_federation_agent.py |tests/http/federation/test_srv_resolver.py |tests/http/test_proxyagent.py diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index efbb5a8dbbaf..1fe9563c98f3 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -14,21 +14,22 @@ import json -from typing import Dict +from typing import Dict, List, Set from unittest.mock import ANY, Mock, call -from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource from synapse.api.constants import EduTypes from synapse.api.errors import AuthError from synapse.federation.transport.server import TransportLayerServer +from synapse.handlers.typing import TypingWriterHandler from synapse.server import HomeServer from synapse.types import JsonDict, Requester, UserID, create_requester from synapse.util import Clock from tests import unittest +from tests.server import ThreadedMemoryReactorClock from tests.test_utils import make_awaitable from tests.unittest import override_config @@ -62,7 +63,11 @@ def _make_edu_transaction_json(edu_type: str, content: JsonDict) -> bytes: class TypingNotificationsTestCase(unittest.HomeserverTestCase): - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + def make_homeserver( + self, + reactor: ThreadedMemoryReactorClock, + clock: Clock, + ) -> HomeServer: # we mock out the keyring so as to skip the authentication check on the # federation API call. mock_keyring = Mock(spec=["verify_json_for_server"]) @@ -75,8 +80,9 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # the tests assume that we are starting at unix time 1000 reactor.pump((1000,)) + self.mock_hs_notifier = Mock() hs = self.setup_test_homeserver( - notifier=Mock(), + notifier=self.mock_hs_notifier, federation_http_client=mock_federation_client, keyring=mock_keyring, replication_streams={}, @@ -90,32 +96,38 @@ def create_resource_dict(self) -> Dict[str, Resource]: return d def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - mock_notifier = hs.get_notifier() - self.on_new_event = mock_notifier.on_new_event + self.on_new_event = self.mock_hs_notifier.on_new_event - self.handler = hs.get_typing_handler() + # hs.get_typing_handler will return a TypingWriterHandler when calling it + # from the main process, and a FollowerTypingHandler on workers. + # We rely on methods only available on the former, so assert we have the + # correct type here. We have to assign self.handler after the assert, + # otherwise mypy will treat it as a FollowerTypingHandler + handler = hs.get_typing_handler() + assert isinstance(handler, TypingWriterHandler) + self.handler = handler self.event_source = hs.get_event_sources().sources.typing self.datastore = hs.get_datastores().main + self.datastore.get_destination_retry_timings = Mock( return_value=make_awaitable(None) ) - self.datastore.get_device_updates_by_remote = Mock( + self.datastore.get_device_updates_by_remote = Mock( # type: ignore[assignment] return_value=make_awaitable((0, [])) ) - self.datastore.get_destination_last_successful_stream_ordering = Mock( + self.datastore.get_destination_last_successful_stream_ordering = Mock( # type: ignore[assignment] return_value=make_awaitable(None) ) - def get_received_txn_response(*args): - return defer.succeed(None) - - self.datastore.get_received_txn_response = get_received_txn_response + self.datastore.get_received_txn_response = Mock( # type: ignore[assignment] + return_value=make_awaitable(None) + ) - self.room_members = [] + self.room_members: List[UserID] = [] async def check_user_in_room(room_id: str, requester: Requester) -> None: if requester.user.to_string() not in [ @@ -124,47 +136,54 @@ async def check_user_in_room(room_id: str, requester: Requester) -> None: raise AuthError(401, "User is not in the room") return None - hs.get_auth().check_user_in_room = check_user_in_room + hs.get_auth().check_user_in_room = Mock( # type: ignore[assignment] + side_effect=check_user_in_room + ) async def check_host_in_room(room_id: str, server_name: str) -> bool: return room_id == ROOM_ID - hs.get_event_auth_handler().is_host_in_room = check_host_in_room + hs.get_event_auth_handler().is_host_in_room = Mock( # type: ignore[assignment] + side_effect=check_host_in_room + ) - async def get_current_hosts_in_room(room_id: str): + async def get_current_hosts_in_room(room_id: str) -> Set[str]: return {member.domain for member in self.room_members} - hs.get_storage_controllers().state.get_current_hosts_in_room = ( - get_current_hosts_in_room + hs.get_storage_controllers().state.get_current_hosts_in_room = Mock( # type: ignore[assignment] + side_effect=get_current_hosts_in_room ) - hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = ( - get_current_hosts_in_room + hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = Mock( # type: ignore[assignment] + side_effect=get_current_hosts_in_room ) - async def get_users_in_room(room_id: str): + async def get_users_in_room(room_id: str) -> Set[str]: return {str(u) for u in self.room_members} - self.datastore.get_users_in_room = get_users_in_room + self.datastore.get_users_in_room = Mock(side_effect=get_users_in_room) - self.datastore.get_user_directory_stream_pos = Mock( + self.datastore.get_user_directory_stream_pos = Mock( # type: ignore[assignment] side_effect=( - # we deliberately return a non-None stream pos to avoid doing an initial_spam + # we deliberately return a non-None stream pos to avoid + # doing an initial_sync lambda: make_awaitable(1) ) ) - self.datastore.get_partial_current_state_deltas = Mock(return_value=(0, None)) + self.datastore.get_partial_current_state_deltas = Mock(return_value=(0, None)) # type: ignore[assignment] - self.datastore.get_to_device_stream_token = lambda: 0 - self.datastore.get_new_device_msgs_for_remote = ( - lambda *args, **kargs: make_awaitable(([], 0)) + self.datastore.get_to_device_stream_token = Mock( # type: ignore[assignment] + side_effect=lambda: 0 + ) + self.datastore.get_new_device_msgs_for_remote = Mock( # type: ignore[assignment] + side_effect=lambda *args, **kargs: make_awaitable(([], 0)) ) - self.datastore.delete_device_msgs_for_remote = ( - lambda *args, **kargs: make_awaitable(None) + self.datastore.delete_device_msgs_for_remote = Mock( # type: ignore[assignment] + side_effect=lambda *args, **kargs: make_awaitable(None) ) - self.datastore.set_received_txn_response = ( - lambda *args, **kwargs: make_awaitable(None) + self.datastore.set_received_txn_response = Mock( # type: ignore[assignment] + side_effect=lambda *args, **kwargs: make_awaitable(None) ) def test_started_typing_local(self) -> None: @@ -186,7 +205,7 @@ def test_started_typing_local(self) -> None: self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( self.event_source.get_new_events( - user=U_APPLE, from_key=0, limit=None, room_ids=[ROOM_ID], is_guest=False + user=U_APPLE, from_key=0, limit=0, room_ids=[ROOM_ID], is_guest=False ) ) self.assertEqual( @@ -257,7 +276,7 @@ def test_started_typing_remote_recv(self) -> None: self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( self.event_source.get_new_events( - user=U_APPLE, from_key=0, limit=None, room_ids=[ROOM_ID], is_guest=False + user=U_APPLE, from_key=0, limit=0, room_ids=[ROOM_ID], is_guest=False ) ) self.assertEqual( @@ -298,7 +317,7 @@ def test_started_typing_remote_recv_not_in_room(self) -> None: self.event_source.get_new_events( user=U_APPLE, from_key=0, - limit=None, + limit=0, room_ids=[OTHER_ROOM_ID], is_guest=False, ) @@ -351,7 +370,7 @@ def test_stopped_typing(self) -> None: self.assertEqual(self.event_source.get_current_key(), 1) events = self.get_success( self.event_source.get_new_events( - user=U_APPLE, from_key=0, limit=None, room_ids=[ROOM_ID], is_guest=False + user=U_APPLE, from_key=0, limit=0, room_ids=[ROOM_ID], is_guest=False ) ) self.assertEqual( @@ -387,7 +406,7 @@ def test_typing_timeout(self) -> None: self.event_source.get_new_events( user=U_APPLE, from_key=0, - limit=None, + limit=0, room_ids=[ROOM_ID], is_guest=False, ) @@ -412,7 +431,7 @@ def test_typing_timeout(self) -> None: self.event_source.get_new_events( user=U_APPLE, from_key=1, - limit=None, + limit=0, room_ids=[ROOM_ID], is_guest=False, ) @@ -447,7 +466,7 @@ def test_typing_timeout(self) -> None: self.event_source.get_new_events( user=U_APPLE, from_key=0, - limit=None, + limit=0, room_ids=[ROOM_ID], is_guest=False, ) diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 3ba896ecf384..f1ca523d23f9 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -28,6 +28,7 @@ from synapse.storage.roommember import ProfileInfo from synapse.util import Clock +from tests.server import ThreadedMemoryReactorClock from tests.test_utils.event_injection import inject_member_event from tests.unittest import HomeserverTestCase, override_config @@ -138,7 +139,9 @@ class UserDirectoryInitialPopulationTestcase(HomeserverTestCase): register.register_servlets, ] - def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + def make_homeserver( + self, reactor: ThreadedMemoryReactorClock, clock: Clock + ) -> HomeServer: self.appservice = ApplicationService( token="i_am_an_app_service", id="1234", diff --git a/tests/unittest.py b/tests/unittest.py index a120c2976ccd..fa92dd94eb6a 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -75,6 +75,7 @@ from tests.server import ( CustomHeaderType, FakeChannel, + ThreadedMemoryReactorClock, get_clock, make_request, setup_test_homeserver, @@ -360,7 +361,7 @@ def wait_for_background_updates(self) -> None: store.db_pool.updates.do_next_background_update(False), by=0.1 ) - def make_homeserver(self, reactor: MemoryReactor, clock: Clock): + def make_homeserver(self, reactor: ThreadedMemoryReactorClock, clock: Clock): """ Make and return a homeserver. From cf66d712c615b96bce19e44118cce1ebda41d0b8 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 26 Jan 2023 10:38:49 +0000 Subject: [PATCH 074/344] Fix initialization of `_device_list_id_gen` (#14914) On startup, the `_device_list_id_gen` stream id generator is initialized using the maximum stream id seen in a list of tables. When we started populating the `device_list_remote_pending` table in #13913, we forgot to add it to the aforementioned list of tables, so the stream id generator can hand out old stream ids after a restart. The end result is that Synapse can fail to handle device list update EDUs after a restart when a partial state join is in progress. Add the `device_list_remote_pending` table to the list of tables to consider when initializing the `_device_list_id_gen` stream id generator. Signed-off-by: Sean Quah --- changelog.d/14914.bugfix | 1 + synapse/storage/databases/main/devices.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/14914.bugfix diff --git a/changelog.d/14914.bugfix b/changelog.d/14914.bugfix new file mode 100644 index 000000000000..af73cca70f6e --- /dev/null +++ b/changelog.d/14914.bugfix @@ -0,0 +1 @@ +Faster joins: Fix a bug introduced in Synapse 1.69 where device list EDUs could fail to be handled after a restart when a faster join sync is in progress. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 903606fb4664..e8b6cc6b8014 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -99,6 +99,7 @@ def __init__( ("user_signature_stream", "stream_id"), ("device_lists_outbound_pokes", "stream_id"), ("device_lists_changes_in_room", "stream_id"), + ("device_lists_remote_pending", "stream_id"), ], is_writer=hs.config.worker.worker_app is None, ) From dc901a885f9a7111c97b2935d51d2d05a26db47b Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 26 Jan 2023 13:27:27 +0000 Subject: [PATCH 075/344] Fix typo in release script (#14920) * Fix typo in release script * Changelog --- changelog.d/14920.misc | 1 + scripts-dev/release.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14920.misc diff --git a/changelog.d/14920.misc b/changelog.d/14920.misc new file mode 100644 index 000000000000..7988897d3933 --- /dev/null +++ b/changelog.d/14920.misc @@ -0,0 +1 @@ +Fix typo in release script. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 6974fd789575..008a5bd965d4 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -438,7 +438,7 @@ def _upload(gh_token: Optional[str]) -> None: repo = get_repo_and_check_clean_checkout() tag = repo.tag(f"refs/tags/{tag_name}") if repo.head.commit != tag.commit: - click.echo("Tag {tag_name} (tag.commit) is not currently checked out!") + click.echo(f"Tag {tag_name} ({tag.commit}) is not currently checked out!") click.get_current_context().abort() # Query all the assets corresponding to this release. From 8a05d5de21888cdd0b53870fead3a1eae64f0b17 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 26 Jan 2023 12:15:36 -0500 Subject: [PATCH 076/344] Batch look-ups to see if rooms are partial stated. (#14917) * Batch look-ups to see if rooms are partial stated. * Fix issues found in linting. * Fix typo. * Apply suggestions from code review Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> * Clarify comments. Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> * Also improve the cache size while we're at it * is_partial_state_rooms -> is_partial_state_room_batched * Run `black` * Improve annotation for `simple_select_many_batch` * Fix is_partial_state_room_batched impl * Okay, _actually_ fix impl * Update description. * Update synapse/storage/databases/main/room.py Co-authored-by: Patrick Cloke * Run black. Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> Co-authored-by: David Robertson --- changelog.d/14917.misc | 1 + synapse/handlers/sync.py | 24 ++++++++++++++++------- synapse/storage/database.py | 2 +- synapse/storage/databases/main/room.py | 27 +++++++++++++++++++++++--- 4 files changed, 43 insertions(+), 11 deletions(-) create mode 100644 changelog.d/14917.misc diff --git a/changelog.d/14917.misc b/changelog.d/14917.misc new file mode 100644 index 000000000000..4d1dd2639a10 --- /dev/null +++ b/changelog.d/14917.misc @@ -0,0 +1 @@ +Faster joins: Improve performance of looking up partial-state status of rooms. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ee117645673f..5ebd3ea855c2 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1383,16 +1383,21 @@ async def generate_sync_result( if not sync_config.filter_collection.lazy_load_members(): # Non-lazy syncs should never include partially stated rooms. # Exclude all partially stated rooms from this sync. - for room_id in mutable_joined_room_ids: - if await self.store.is_partial_state_room(room_id): - mutable_rooms_to_exclude.add(room_id) + results = await self.store.is_partial_state_room_batched( + mutable_joined_room_ids + ) + mutable_rooms_to_exclude.update( + room_id + for room_id, is_partial_state in results.items() + if is_partial_state + ) # Incremental eager syncs should additionally include rooms that # - we are joined to # - are full-stated # - became fully-stated at some point during the sync period # (These rooms will have been omitted during a previous eager sync.) - forced_newly_joined_room_ids = set() + forced_newly_joined_room_ids: Set[str] = set() if since_token and not sync_config.filter_collection.lazy_load_members(): un_partial_stated_rooms = ( await self.store.get_un_partial_stated_rooms_between( @@ -1401,9 +1406,14 @@ async def generate_sync_result( mutable_joined_room_ids, ) ) - for room_id in un_partial_stated_rooms: - if not await self.store.is_partial_state_room(room_id): - forced_newly_joined_room_ids.add(room_id) + results = await self.store.is_partial_state_room_batched( + un_partial_stated_rooms + ) + forced_newly_joined_room_ids.update( + room_id + for room_id, is_partial_state in results.items() + if not is_partial_state + ) # Now we have our list of joined room IDs, exclude as configured and freeze joined_room_ids = frozenset( diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 88479a16db0c..e20c5c5302fa 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -1819,7 +1819,7 @@ async def simple_select_many_batch( keyvalues: Optional[Dict[str, Any]] = None, desc: str = "simple_select_many_batch", batch_size: int = 100, - ) -> List[Any]: + ) -> List[Dict[str, Any]]: """Executes a SELECT query on the named table, which may return zero or more rows, returning the result as a list of dicts. diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 3aa7b945606e..fbbc01888701 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -60,9 +60,9 @@ MultiWriterIdGenerator, StreamIdGenerator, ) -from synapse.types import JsonDict, RetentionPolicy, ThirdPartyInstanceID +from synapse.types import JsonDict, RetentionPolicy, StrCollection, ThirdPartyInstanceID from synapse.util import json_encoder -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import cached, cachedList from synapse.util.stringutils import MXC_REGEX if TYPE_CHECKING: @@ -1255,7 +1255,7 @@ async def get_partial_state_room_resync_info( return room_servers - @cached() + @cached(max_entries=10000) async def is_partial_state_room(self, room_id: str) -> bool: """Checks if this room has partial state. @@ -1274,6 +1274,27 @@ async def is_partial_state_room(self, room_id: str) -> bool: return entry is not None + @cachedList(cached_method_name="is_partial_state_room", list_name="room_ids") + async def is_partial_state_room_batched( + self, room_ids: StrCollection + ) -> Mapping[str, bool]: + """Checks if the given rooms have partial state. + + Returns true for "partial-state" rooms, which means that the state + at events in the room, and `current_state_events`, may not yet be + complete. + """ + + rows: List[Dict[str, str]] = await self.db_pool.simple_select_many_batch( + table="partial_state_rooms", + column="room_id", + iterable=room_ids, + retcols=("room_id",), + desc="is_partial_state_room_batched", + ) + partial_state_rooms = {row_dict["room_id"] for row_dict in rows} + return {room_id: room_id in partial_state_rooms for room_id in room_ids} + async def get_join_event_id_and_device_lists_stream_id_for_partial_state( self, room_id: str ) -> Tuple[str, int]: From ba79fb4a61784f4b5613da795a61f430af053ca6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 26 Jan 2023 12:31:58 -0500 Subject: [PATCH 077/344] Use StrCollection in place of Collection[str] in (most) handlers code. (#14922) Due to the increased safety of StrCollection over Collection[str] and Sequence[str]. --- changelog.d/14922.misc | 1 + synapse/handlers/account_data.py | 6 +++--- synapse/handlers/device.py | 6 +++--- synapse/handlers/event_auth.py | 8 ++++---- synapse/handlers/federation.py | 26 ++++++++------------------ synapse/handlers/federation_event.py | 5 +++-- synapse/handlers/pagination.py | 6 +++--- synapse/handlers/room.py | 14 +++----------- synapse/handlers/room_summary.py | 4 ++-- synapse/handlers/search.py | 8 ++++---- synapse/handlers/sso.py | 9 +++++---- synapse/handlers/sync.py | 4 ++-- synapse/rest/client/push_rule.py | 4 ++-- 13 files changed, 43 insertions(+), 58 deletions(-) create mode 100644 changelog.d/14922.misc diff --git a/changelog.d/14922.misc b/changelog.d/14922.misc new file mode 100644 index 000000000000..2cc3614dfded --- /dev/null +++ b/changelog.d/14922.misc @@ -0,0 +1 @@ +Use `StrCollection` to avoid potential bugs with `Collection[str]`. diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 834006356a28..d500b21809fa 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -14,7 +14,7 @@ # limitations under the License. import logging import random -from typing import TYPE_CHECKING, Awaitable, Callable, Collection, List, Optional, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple from synapse.api.constants import AccountDataTypes from synapse.replication.http.account_data import ( @@ -26,7 +26,7 @@ ReplicationRemoveUserAccountDataRestServlet, ) from synapse.streams import EventSource -from synapse.types import JsonDict, StreamKeyType, UserID +from synapse.types import JsonDict, StrCollection, StreamKeyType, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -322,7 +322,7 @@ async def get_new_events( user: UserID, from_key: int, limit: int, - room_ids: Collection[str], + room_ids: StrCollection, is_guest: bool, explicit_room_id: Optional[str] = None, ) -> Tuple[List[JsonDict], int]: diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 58180ae2faba..5c0607390137 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -18,7 +18,6 @@ from typing import ( TYPE_CHECKING, Any, - Collection, Dict, Iterable, List, @@ -45,6 +44,7 @@ ) from synapse.types import ( JsonDict, + StrCollection, StreamKeyType, StreamToken, UserID, @@ -146,7 +146,7 @@ async def get_device(self, user_id: str, device_id: str) -> JsonDict: @cancellable async def get_device_changes_in_shared_rooms( - self, user_id: str, room_ids: Collection[str], from_token: StreamToken + self, user_id: str, room_ids: StrCollection, from_token: StreamToken ) -> Set[str]: """Get the set of users whose devices have changed who share a room with the given user. @@ -551,7 +551,7 @@ async def update_device(self, user_id: str, device_id: str, content: dict) -> No @trace @measure_func("notify_device_update") async def notify_device_update( - self, user_id: str, device_ids: Collection[str] + self, user_id: str, device_ids: StrCollection ) -> None: """Notify that a user's device(s) has changed. Pokes the notifier, and remote servers if the user is local. diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index f91dbbecb79c..a23a8ce2a167 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Collection, List, Mapping, Optional, Union +from typing import TYPE_CHECKING, List, Mapping, Optional, Union from synapse import event_auth from synapse.api.constants import ( @@ -29,7 +29,7 @@ ) from synapse.events import EventBase from synapse.events.builder import EventBuilder -from synapse.types import StateMap, get_domain_from_id +from synapse.types import StateMap, StrCollection, get_domain_from_id if TYPE_CHECKING: from synapse.server import HomeServer @@ -290,7 +290,7 @@ async def has_restricted_join_rules( async def get_rooms_that_allow_join( self, state_ids: StateMap[str] - ) -> Collection[str]: + ) -> StrCollection: """ Generate a list of rooms in which membership allows access to a room. @@ -331,7 +331,7 @@ async def get_rooms_that_allow_join( return result - async def is_user_in_rooms(self, room_ids: Collection[str], user_id: str) -> bool: + async def is_user_in_rooms(self, room_ids: StrCollection, user_id: str) -> bool: """ Check whether a user is a member of any of the provided rooms. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 233f8c113d19..dc1cbf5c3d13 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -20,17 +20,7 @@ import logging from enum import Enum from http import HTTPStatus -from typing import ( - TYPE_CHECKING, - Collection, - Dict, - Iterable, - List, - Optional, - Set, - Tuple, - Union, -) +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple, Union import attr from prometheus_client import Histogram @@ -70,7 +60,7 @@ ) from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events_worker import EventRedactBehaviour -from synapse.types import JsonDict, get_domain_from_id +from synapse.types import JsonDict, StrCollection, get_domain_from_id from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer from synapse.util.retryutils import NotRetryingDestination @@ -179,7 +169,7 @@ def __init__(self, hs: "HomeServer"): # A dictionary mapping room IDs to (initial destination, other destinations) # tuples. self._partial_state_syncs_maybe_needing_restart: Dict[ - str, Tuple[Optional[str], Collection[str]] + str, Tuple[Optional[str], StrCollection] ] = {} # A lock guarding the partial state flag for rooms. # When the lock is held for a given room, no other concurrent code may @@ -437,7 +427,7 @@ async def _maybe_backfill_inner( ) ) - async def try_backfill(domains: Collection[str]) -> bool: + async def try_backfill(domains: StrCollection) -> bool: # TODO: Should we try multiple of these at a time? # Number of contacted remote homeservers that have denied our backfill @@ -1730,7 +1720,7 @@ async def _resume_partial_state_room_sync(self) -> None: def _start_partial_state_room_sync( self, initial_destination: Optional[str], - other_destinations: Collection[str], + other_destinations: StrCollection, room_id: str, ) -> None: """Starts the background process to resync the state of a partial state room, @@ -1812,7 +1802,7 @@ async def _sync_partial_state_room_wrapper() -> None: async def _sync_partial_state_room( self, initial_destination: Optional[str], - other_destinations: Collection[str], + other_destinations: StrCollection, room_id: str, ) -> None: """Background process to resync the state of a partial-state room @@ -1949,9 +1939,9 @@ async def _sync_partial_state_room( def _prioritise_destinations_for_partial_state_resync( initial_destination: Optional[str], - other_destinations: Collection[str], + other_destinations: StrCollection, room_id: str, -) -> Collection[str]: +) -> StrCollection: """Work out the order in which we should ask servers to resync events. If an `initial_destination` is given, it takes top priority. Otherwise diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 904a721483c9..e037acbca2bf 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -80,6 +80,7 @@ PersistedEventPosition, RoomStreamToken, StateMap, + StrCollection, UserID, get_domain_from_id, ) @@ -615,7 +616,7 @@ async def update_state_for_partial_state_event( @trace async def backfill( - self, dest: str, room_id: str, limit: int, extremities: Collection[str] + self, dest: str, room_id: str, limit: int, extremities: StrCollection ) -> None: """Trigger a backfill request to `dest` for the given `room_id` @@ -1565,7 +1566,7 @@ async def backfill_event_id( @trace @tag_args async def _get_events_and_persist( - self, destination: str, room_id: str, event_ids: Collection[str] + self, destination: str, room_id: str, event_ids: StrCollection ) -> None: """Fetch the given events from a server, and persist them as outliers. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 8c8ff18a1a61..1fe656718573 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Collection, Dict, List, Optional, Set +from typing import TYPE_CHECKING, Dict, List, Optional, Set import attr @@ -28,7 +28,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.rest.admin._base import assert_user_is_admin from synapse.streams.config import PaginationConfig -from synapse.types import JsonDict, Requester, StreamKeyType +from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType from synapse.types.state import StateFilter from synapse.util.async_helpers import ReadWriteLock from synapse.util.stringutils import random_string @@ -391,7 +391,7 @@ def get_delete_status(self, delete_id: str) -> Optional[DeleteStatus]: """ return self._delete_by_id.get(delete_id) - def get_delete_ids_by_room(self, room_id: str) -> Optional[Collection[str]]: + def get_delete_ids_by_room(self, room_id: str) -> Optional[StrCollection]: """Get all active delete ids by room Args: diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 572c7b4db344..60a6d9cf3c18 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -20,16 +20,7 @@ import string from collections import OrderedDict from http import HTTPStatus -from typing import ( - TYPE_CHECKING, - Any, - Awaitable, - Collection, - Dict, - List, - Optional, - Tuple, -) +from typing import TYPE_CHECKING, Any, Awaitable, Dict, List, Optional, Tuple import attr from typing_extensions import TypedDict @@ -72,6 +63,7 @@ RoomID, RoomStreamToken, StateMap, + StrCollection, StreamKeyType, StreamToken, UserID, @@ -1644,7 +1636,7 @@ async def get_new_events( user: UserID, from_key: RoomStreamToken, limit: int, - room_ids: Collection[str], + room_ids: StrCollection, is_guest: bool, explicit_room_id: Optional[str] = None, ) -> Tuple[List[EventBase], RoomStreamToken]: diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index c6b869c6f44e..4472019fbcd2 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -36,7 +36,7 @@ ) from synapse.api.ratelimiting import Ratelimiter from synapse.events import EventBase -from synapse.types import JsonDict, Requester +from synapse.types import JsonDict, Requester, StrCollection from synapse.util.caches.response_cache import ResponseCache if TYPE_CHECKING: @@ -870,7 +870,7 @@ class _RoomQueueEntry: # The room ID of this entry. room_id: str # The server to query if the room is not known locally. - via: Sequence[str] + via: StrCollection # The minimum number of hops necessary to get to this room (compared to the # originally requested room). depth: int = 0 diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 40f4635c4e29..9bbf83047dd6 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -14,7 +14,7 @@ import itertools import logging -from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple import attr from unpaddedbase64 import decode_base64, encode_base64 @@ -23,7 +23,7 @@ from synapse.api.errors import NotFoundError, SynapseError from synapse.api.filtering import Filter from synapse.events import EventBase -from synapse.types import JsonDict, StreamKeyType, UserID +from synapse.types import JsonDict, StrCollection, StreamKeyType, UserID from synapse.types.state import StateFilter from synapse.visibility import filter_events_for_client @@ -418,7 +418,7 @@ async def _search( async def _search_by_rank( self, user: UserID, - room_ids: Collection[str], + room_ids: StrCollection, search_term: str, keys: Iterable[str], search_filter: Filter, @@ -491,7 +491,7 @@ async def _search_by_rank( async def _search_by_recent( self, user: UserID, - room_ids: Collection[str], + room_ids: StrCollection, search_term: str, keys: Iterable[str], search_filter: Filter, diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 44e70fc4b874..4a27c0f05142 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -20,7 +20,6 @@ Any, Awaitable, Callable, - Collection, Dict, Iterable, List, @@ -47,6 +46,7 @@ from synapse.http.site import SynapseRequest from synapse.types import ( JsonDict, + StrCollection, UserID, contains_invalid_mxid_characters, create_requester, @@ -141,7 +141,8 @@ class UserAttributes: confirm_localpart: bool = False display_name: Optional[str] = None picture: Optional[str] = None - emails: Collection[str] = attr.Factory(list) + # mypy thinks these are incompatible for some reason. + emails: StrCollection = attr.Factory(list) # type: ignore[assignment] @attr.s(slots=True, auto_attribs=True) @@ -159,7 +160,7 @@ class UsernameMappingSession: # attributes returned by the ID mapper display_name: Optional[str] - emails: Collection[str] + emails: StrCollection # An optional dictionary of extra attributes to be provided to the client in the # login response. @@ -174,7 +175,7 @@ class UsernameMappingSession: # choices made by the user chosen_localpart: Optional[str] = None use_display_name: bool = True - emails_to_use: Collection[str] = () + emails_to_use: StrCollection = () terms_accepted_version: Optional[str] = None diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index ee117645673f..9e9601d423ce 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -17,7 +17,6 @@ TYPE_CHECKING, AbstractSet, Any, - Collection, Dict, FrozenSet, List, @@ -62,6 +61,7 @@ Requester, RoomStreamToken, StateMap, + StrCollection, StreamKeyType, StreamToken, UserID, @@ -1179,7 +1179,7 @@ async def compute_state_delta( async def _find_missing_partial_state_memberships( self, room_id: str, - members_to_fetch: Collection[str], + members_to_fetch: StrCollection, events_with_membership_auth: Mapping[str, EventBase], found_state_ids: StateMap[str], ) -> StateMap[str]: diff --git a/synapse/rest/client/push_rule.py b/synapse/rest/client/push_rule.py index 8191b4e32c34..ad5c10c99dd8 100644 --- a/synapse/rest/client/push_rule.py +++ b/synapse/rest/client/push_rule.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, List, Sequence, Tuple, Union +from typing import TYPE_CHECKING, List, Tuple, Union from synapse.api.errors import ( NotFoundError, @@ -169,7 +169,7 @@ async def on_GET(self, request: SynapseRequest, path: str) -> Tuple[int, JsonDic raise UnrecognizedRequestError() -def _rule_spec_from_path(path: Sequence[str]) -> RuleSpec: +def _rule_spec_from_path(path: List[str]) -> RuleSpec: """Turn a sequence of path components into a rule spec Args: From 345576bc349f2c96b273bea246a5bb44c705c6ec Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 26 Jan 2023 13:24:15 -0500 Subject: [PATCH 078/344] Fix paginating /relations with a live token (#14866) The `/relations` endpoint was not properly handle "live tokens" (i.e sync tokens), to do this properly we abstract the code that `/messages` has and re-use it. --- changelog.d/14866.bugfix | 1 + synapse/storage/databases/main/relations.py | 38 +++-- synapse/storage/databases/main/stream.py | 154 +++++++++++++------- 3 files changed, 123 insertions(+), 70 deletions(-) create mode 100644 changelog.d/14866.bugfix diff --git a/changelog.d/14866.bugfix b/changelog.d/14866.bugfix new file mode 100644 index 000000000000..540f918cbd10 --- /dev/null +++ b/changelog.d/14866.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.53.0 where `next_batch` tokens from `/sync` could not be used with the `/relations` endpoint. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 84f844b79e7f..be2242b6aca3 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -40,9 +40,13 @@ LoggingTransaction, make_in_list_sql_clause, ) -from synapse.storage.databases.main.stream import generate_pagination_where_clause +from synapse.storage.databases.main.stream import ( + generate_next_token, + generate_pagination_bounds, + generate_pagination_where_clause, +) from synapse.storage.engines import PostgresEngine -from synapse.types import JsonDict, RoomStreamToken, StreamKeyType, StreamToken +from synapse.types import JsonDict, StreamKeyType, StreamToken from synapse.util.caches.descriptors import cached, cachedList if TYPE_CHECKING: @@ -207,24 +211,23 @@ async def get_relations_for_event( where_clause.append("type = ?") where_args.append(event_type) + order, from_bound, to_bound = generate_pagination_bounds( + direction, + from_token.room_key if from_token else None, + to_token.room_key if to_token else None, + ) + pagination_clause = generate_pagination_where_clause( direction=direction, column_names=("topological_ordering", "stream_ordering"), - from_token=from_token.room_key.as_historical_tuple() - if from_token - else None, - to_token=to_token.room_key.as_historical_tuple() if to_token else None, + from_token=from_bound, + to_token=to_bound, engine=self.database_engine, ) if pagination_clause: where_clause.append(pagination_clause) - if direction == "b": - order = "DESC" - else: - order = "ASC" - sql = """ SELECT event_id, relation_type, sender, topological_ordering, stream_ordering FROM event_relations @@ -266,16 +269,9 @@ def _get_recent_references_for_event_txn( topo_orderings = topo_orderings[:limit] stream_orderings = stream_orderings[:limit] - topo = topo_orderings[-1] - token = stream_orderings[-1] - if direction == "b": - # Tokens are positions between events. - # This token points *after* the last event in the chunk. - # We need it to point to the event before it in the chunk - # when we are going backwards so we subtract one from the - # stream part. - token -= 1 - next_key = RoomStreamToken(topo, token) + next_key = generate_next_token( + direction, topo_orderings[-1], stream_orderings[-1] + ) if from_token: next_token = from_token.copy_and_replace( diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index d28fc65df9bb..8977bf33e786 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -170,6 +170,104 @@ def generate_pagination_where_clause( return " AND ".join(where_clause) +def generate_pagination_bounds( + direction: str, + from_token: Optional[RoomStreamToken], + to_token: Optional[RoomStreamToken], +) -> Tuple[ + str, Optional[Tuple[Optional[int], int]], Optional[Tuple[Optional[int], int]] +]: + """ + Generate a start and end point for this page of events. + + Args: + direction: Whether pagination is going forwards or backwards. One of "f" or "b". + from_token: The token to start pagination at, or None to start at the first value. + to_token: The token to end pagination at, or None to not limit the end point. + + Returns: + A three tuple of: + + ASC or DESC for sorting of the query. + + The starting position as a tuple of ints representing + (topological position, stream position) or None if no from_token was + provided. The topological position may be None for live tokens. + + The end position in the same format as the starting position, or None + if no to_token was provided. + """ + + # Tokens really represent positions between elements, but we use + # the convention of pointing to the event before the gap. Hence + # we have a bit of asymmetry when it comes to equalities. + if direction == "b": + order = "DESC" + else: + order = "ASC" + + # The bounds for the stream tokens are complicated by the fact + # that we need to handle the instance_map part of the tokens. We do this + # by fetching all events between the min stream token and the maximum + # stream token (as returned by `RoomStreamToken.get_max_stream_pos`) and + # then filtering the results. + from_bound: Optional[Tuple[Optional[int], int]] = None + if from_token: + if from_token.topological is not None: + from_bound = from_token.as_historical_tuple() + elif direction == "b": + from_bound = ( + None, + from_token.get_max_stream_pos(), + ) + else: + from_bound = ( + None, + from_token.stream, + ) + + to_bound: Optional[Tuple[Optional[int], int]] = None + if to_token: + if to_token.topological is not None: + to_bound = to_token.as_historical_tuple() + elif direction == "b": + to_bound = ( + None, + to_token.stream, + ) + else: + to_bound = ( + None, + to_token.get_max_stream_pos(), + ) + + return order, from_bound, to_bound + + +def generate_next_token( + direction: str, last_topo_ordering: int, last_stream_ordering: int +) -> RoomStreamToken: + """ + Generate the next room stream token based on the currently returned data. + + Args: + direction: Whether pagination is going forwards or backwards. One of "f" or "b". + last_topo_ordering: The last topological ordering being returned. + last_stream_ordering: The last stream ordering being returned. + + Returns: + A new RoomStreamToken to return to the client. + """ + if direction == "b": + # Tokens are positions between events. + # This token points *after* the last event in the chunk. + # We need it to point to the event before it in the chunk + # when we are going backwards so we subtract one from the + # stream part. + last_stream_ordering -= 1 + return RoomStreamToken(last_topo_ordering, last_stream_ordering) + + def _make_generic_sql_bound( bound: str, column_names: Tuple[str, str], @@ -1300,47 +1398,11 @@ def _paginate_room_events_txn( `to_token`), or `limit` is zero. """ - # Tokens really represent positions between elements, but we use - # the convention of pointing to the event before the gap. Hence - # we have a bit of asymmetry when it comes to equalities. args = [False, room_id] - if direction == "b": - order = "DESC" - else: - order = "ASC" - - # The bounds for the stream tokens are complicated by the fact - # that we need to handle the instance_map part of the tokens. We do this - # by fetching all events between the min stream token and the maximum - # stream token (as returned by `RoomStreamToken.get_max_stream_pos`) and - # then filtering the results. - if from_token.topological is not None: - from_bound: Tuple[Optional[int], int] = from_token.as_historical_tuple() - elif direction == "b": - from_bound = ( - None, - from_token.get_max_stream_pos(), - ) - else: - from_bound = ( - None, - from_token.stream, - ) - to_bound: Optional[Tuple[Optional[int], int]] = None - if to_token: - if to_token.topological is not None: - to_bound = to_token.as_historical_tuple() - elif direction == "b": - to_bound = ( - None, - to_token.stream, - ) - else: - to_bound = ( - None, - to_token.get_max_stream_pos(), - ) + order, from_bound, to_bound = generate_pagination_bounds( + direction, from_token, to_token + ) bounds = generate_pagination_where_clause( direction=direction, @@ -1436,16 +1498,10 @@ def _paginate_room_events_txn( ][:limit] if rows: - topo = rows[-1].topological_ordering - token = rows[-1].stream_ordering - if direction == "b": - # Tokens are positions between events. - # This token points *after* the last event in the chunk. - # We need it to point to the event before it in the chunk - # when we are going backwards so we subtract one from the - # stream part. - token -= 1 - next_token = RoomStreamToken(topo, token) + assert rows[-1].topological_ordering is not None + next_token = generate_next_token( + direction, rows[-1].topological_ordering, rows[-1].stream_ordering + ) else: # TODO (erikj): We should work out what to do here instead. next_token = to_token if to_token else from_token From fc35e0673f5b46ea0f5e53ef15626b14a452ca82 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 26 Jan 2023 14:45:24 -0500 Subject: [PATCH 079/344] Add missing type hints in tests (#14879) * FIx-up type hints in tests.logging. * Add missing type hints to test_transactions. --- changelog.d/14879.misc | 1 + mypy.ini | 6 ++-- synapse/rest/client/transactions.py | 3 +- tests/logging/__init__.py | 6 ++-- tests/logging/test_opentracing.py | 4 +-- tests/logging/test_remote_handler.py | 25 ++++++++++----- tests/logging/test_terse_json.py | 30 ++++++++++-------- tests/rest/client/test_transactions.py | 42 +++++++++++++++++--------- 8 files changed, 75 insertions(+), 42 deletions(-) create mode 100644 changelog.d/14879.misc diff --git a/changelog.d/14879.misc b/changelog.d/14879.misc new file mode 100644 index 000000000000..d44571b73149 --- /dev/null +++ b/changelog.d/14879.misc @@ -0,0 +1 @@ +Add missing type hints. diff --git a/mypy.ini b/mypy.ini index e57bc6426156..978d92940b20 100644 --- a/mypy.ini +++ b/mypy.ini @@ -40,10 +40,7 @@ exclude = (?x) |tests/http/federation/test_matrix_federation_agent.py |tests/http/federation/test_srv_resolver.py |tests/http/test_proxyagent.py - |tests/logging/__init__.py - |tests/logging/test_terse_json.py |tests/module_api/test_api.py - |tests/rest/client/test_transactions.py |tests/rest/media/v1/test_media_storage.py |tests/server.py |tests/test_state.py @@ -92,6 +89,9 @@ disallow_untyped_defs = True [mypy-tests.handlers.*] disallow_untyped_defs = True +[mypy-tests.logging.*] +disallow_untyped_defs = True + [mypy-tests.metrics.*] disallow_untyped_defs = True diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 61375651bc15..3f40f1874a8b 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -19,6 +19,7 @@ from typing_extensions import ParamSpec +from twisted.internet.defer import Deferred from twisted.python.failure import Failure from twisted.web.server import Request @@ -90,7 +91,7 @@ def fetch_or_execute( fn: Callable[P, Awaitable[Tuple[int, JsonDict]]], *args: P.args, **kwargs: P.kwargs, - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> "Deferred[Tuple[int, JsonDict]]": """Fetches the response for this transaction, or executes the given function to produce a response for this transaction. diff --git a/tests/logging/__init__.py b/tests/logging/__init__.py index 1acf5666a856..1c5de95a809f 100644 --- a/tests/logging/__init__.py +++ b/tests/logging/__init__.py @@ -13,9 +13,11 @@ # limitations under the License. import logging +from tests.unittest import TestCase -class LoggerCleanupMixin: - def get_logger(self, handler): + +class LoggerCleanupMixin(TestCase): + def get_logger(self, handler: logging.Handler) -> logging.Logger: """ Attach a handler to a logger and add clean-ups to remove revert this. """ diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index 0917e478a5b0..e28ba84cc2b7 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -153,7 +153,7 @@ def test_overlapping_spans(self) -> None: scopes = [] - async def task(i: int): + async def task(i: int) -> None: scope = start_active_span( f"task{i}", tracer=self._tracer, @@ -165,7 +165,7 @@ async def task(i: int): self.assertEqual(self._tracer.active_span, scope.span) scope.close() - async def root(): + async def root() -> None: with start_active_span("root span", tracer=self._tracer) as root_scope: self.assertEqual(self._tracer.active_span, root_scope.span) scopes.append(root_scope) diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index b0d046fe0079..c08954d887cb 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -11,7 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from twisted.test.proto_helpers import AccumulatingProtocol +from typing import Tuple + +from twisted.internet.protocol import Protocol +from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock from synapse.logging import RemoteHandler @@ -20,7 +23,9 @@ from tests.unittest import TestCase -def connect_logging_client(reactor, client_id): +def connect_logging_client( + reactor: MemoryReactorClock, client_id: int +) -> Tuple[Protocol, AccumulatingProtocol]: # This is essentially tests.server.connect_client, but disabling autoflush on # the client transport. This is necessary to avoid an infinite loop due to # sending of data via the logging transport causing additional logs to be @@ -35,10 +40,10 @@ def connect_logging_client(reactor, client_id): class RemoteHandlerTestCase(LoggerCleanupMixin, TestCase): - def setUp(self): + def setUp(self) -> None: self.reactor, _ = get_clock() - def test_log_output(self): + def test_log_output(self) -> None: """ The remote handler delivers logs over TCP. """ @@ -51,6 +56,7 @@ def test_log_output(self): client, server = connect_logging_client(self.reactor, 0) # Trigger data being sent + assert isinstance(client.transport, FakeTransport) client.transport.flush() # One log message, with a single trailing newline @@ -61,7 +67,7 @@ def test_log_output(self): # Ensure the data passed through properly. self.assertEqual(logs[0], "Hello there, wally!") - def test_log_backpressure_debug(self): + def test_log_backpressure_debug(self) -> None: """ When backpressure is hit, DEBUG logs will be shed. """ @@ -83,6 +89,7 @@ def test_log_backpressure_debug(self): # Allow the reconnection client, server = connect_logging_client(self.reactor, 0) + assert isinstance(client.transport, FakeTransport) client.transport.flush() # Only the 7 infos made it through, the debugs were elided @@ -90,7 +97,7 @@ def test_log_backpressure_debug(self): self.assertEqual(len(logs), 7) self.assertNotIn(b"debug", server.data) - def test_log_backpressure_info(self): + def test_log_backpressure_info(self) -> None: """ When backpressure is hit, DEBUG and INFO logs will be shed. """ @@ -116,6 +123,7 @@ def test_log_backpressure_info(self): # Allow the reconnection client, server = connect_logging_client(self.reactor, 0) + assert isinstance(client.transport, FakeTransport) client.transport.flush() # The 10 warnings made it through, the debugs and infos were elided @@ -124,7 +132,7 @@ def test_log_backpressure_info(self): self.assertNotIn(b"debug", server.data) self.assertNotIn(b"info", server.data) - def test_log_backpressure_cut_middle(self): + def test_log_backpressure_cut_middle(self) -> None: """ When backpressure is hit, and no more DEBUG and INFOs cannot be culled, it will cut the middle messages out. @@ -140,6 +148,7 @@ def test_log_backpressure_cut_middle(self): # Allow the reconnection client, server = connect_logging_client(self.reactor, 0) + assert isinstance(client.transport, FakeTransport) client.transport.flush() # The first five and last five warnings made it through, the debugs and @@ -151,7 +160,7 @@ def test_log_backpressure_cut_middle(self): logs, ) - def test_cancel_connection(self): + def test_cancel_connection(self) -> None: """ Gracefully handle the connection being cancelled. """ diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index 0b0d8737c117..fa27f1279a95 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -14,24 +14,28 @@ import json import logging from io import BytesIO, StringIO +from typing import cast from unittest.mock import Mock, patch +from twisted.web.http import HTTPChannel from twisted.web.server import Request from synapse.http.site import SynapseRequest from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter from synapse.logging.context import LoggingContext, LoggingContextFilter +from synapse.types import JsonDict from tests.logging import LoggerCleanupMixin -from tests.server import FakeChannel +from tests.server import FakeChannel, get_clock from tests.unittest import TestCase class TerseJsonTestCase(LoggerCleanupMixin, TestCase): - def setUp(self): + def setUp(self) -> None: self.output = StringIO() + self.reactor, _ = get_clock() - def get_log_line(self): + def get_log_line(self) -> JsonDict: # One log message, with a single trailing newline. data = self.output.getvalue() logs = data.splitlines() @@ -39,7 +43,7 @@ def get_log_line(self): self.assertEqual(data.count("\n"), 1) return json.loads(logs[0]) - def test_terse_json_output(self): + def test_terse_json_output(self) -> None: """ The Terse JSON formatter converts log messages to JSON. """ @@ -61,7 +65,7 @@ def test_terse_json_output(self): self.assertCountEqual(log.keys(), expected_log_keys) self.assertEqual(log["log"], "Hello there, wally!") - def test_extra_data(self): + def test_extra_data(self) -> None: """ Additional information can be included in the structured logging. """ @@ -93,7 +97,7 @@ def test_extra_data(self): self.assertEqual(log["int"], 3) self.assertIs(log["bool"], True) - def test_json_output(self): + def test_json_output(self) -> None: """ The Terse JSON formatter converts log messages to JSON. """ @@ -114,7 +118,7 @@ def test_json_output(self): self.assertCountEqual(log.keys(), expected_log_keys) self.assertEqual(log["log"], "Hello there, wally!") - def test_with_context(self): + def test_with_context(self) -> None: """ The logging context should be added to the JSON response. """ @@ -139,7 +143,7 @@ def test_with_context(self): self.assertEqual(log["log"], "Hello there, wally!") self.assertEqual(log["request"], "name") - def test_with_request_context(self): + def test_with_request_context(self) -> None: """ Information from the logging context request should be added to the JSON response. """ @@ -154,11 +158,13 @@ def test_with_request_context(self): site.server_version_string = "Server v1" site.reactor = Mock() site.experimental_cors_msc3886 = False - request = SynapseRequest(FakeChannel(site, None), site) + request = SynapseRequest( + cast(HTTPChannel, FakeChannel(site, self.reactor)), site + ) # Call requestReceived to finish instantiating the object. request.content = BytesIO() - # Partially skip some of the internal processing of SynapseRequest. - request._started_processing = Mock() + # Partially skip some internal processing of SynapseRequest. + request._started_processing = Mock() # type: ignore[assignment] request.request_metrics = Mock(spec=["name"]) with patch.object(Request, "render"): request.requestReceived(b"POST", b"/_matrix/client/versions", b"1.1") @@ -200,7 +206,7 @@ def test_with_request_context(self): self.assertEqual(log["protocol"], "1.1") self.assertEqual(log["user_agent"], "") - def test_with_exception(self): + def test_with_exception(self) -> None: """ The logging exception type & value should be added to the JSON response. """ diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 21a1ca2a6885..3086e1b5650b 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -13,18 +13,22 @@ # limitations under the License. from http import HTTPStatus +from typing import Any, Generator, Tuple, cast from unittest.mock import Mock, call -from twisted.internet import defer, reactor +from twisted.internet import defer, reactor as _reactor from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache +from synapse.types import ISynapseReactor, JsonDict from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable from tests.utils import MockClock +reactor = cast(ISynapseReactor, _reactor) + class HttpTransactionCacheTestCase(unittest.TestCase): def setUp(self) -> None: @@ -34,11 +38,13 @@ def setUp(self) -> None: self.hs.get_auth = Mock() self.cache = HttpTransactionCache(self.hs) - self.mock_http_response = (HTTPStatus.OK, "GOOD JOB!") + self.mock_http_response = (HTTPStatus.OK, {"result": "GOOD JOB!"}) self.mock_key = "foo" @defer.inlineCallbacks - def test_executes_given_function(self): + def test_executes_given_function( + self, + ) -> Generator["defer.Deferred[Any]", object, None]: cb = Mock(return_value=make_awaitable(self.mock_http_response)) res = yield self.cache.fetch_or_execute( self.mock_key, cb, "some_arg", keyword="arg" @@ -47,7 +53,9 @@ def test_executes_given_function(self): self.assertEqual(res, self.mock_http_response) @defer.inlineCallbacks - def test_deduplicates_based_on_key(self): + def test_deduplicates_based_on_key( + self, + ) -> Generator["defer.Deferred[Any]", object, None]: cb = Mock(return_value=make_awaitable(self.mock_http_response)) for i in range(3): # invoke multiple times res = yield self.cache.fetch_or_execute( @@ -58,18 +66,20 @@ def test_deduplicates_based_on_key(self): cb.assert_called_once_with("some_arg", keyword="arg", changing_args=0) @defer.inlineCallbacks - def test_logcontexts_with_async_result(self): + def test_logcontexts_with_async_result( + self, + ) -> Generator["defer.Deferred[Any]", object, None]: @defer.inlineCallbacks - def cb(): + def cb() -> Generator["defer.Deferred[object]", object, Tuple[int, JsonDict]]: yield Clock(reactor).sleep(0) - return "yay" + return 1, {} @defer.inlineCallbacks - def test(): + def test() -> Generator["defer.Deferred[Any]", object, None]: with LoggingContext("c") as c1: res = yield self.cache.fetch_or_execute(self.mock_key, cb) self.assertIs(current_context(), c1) - self.assertEqual(res, "yay") + self.assertEqual(res, (1, {})) # run the test twice in parallel d = defer.gatherResults([test(), test()]) @@ -78,13 +88,15 @@ def test(): self.assertIs(current_context(), SENTINEL_CONTEXT) @defer.inlineCallbacks - def test_does_not_cache_exceptions(self): + def test_does_not_cache_exceptions( + self, + ) -> Generator["defer.Deferred[Any]", object, None]: """Checks that, if the callback throws an exception, it is called again for the next request. """ called = [False] - def cb(): + def cb() -> "defer.Deferred[Tuple[int, JsonDict]]": if called[0]: # return a valid result the second time return defer.succeed(self.mock_http_response) @@ -104,13 +116,15 @@ def cb(): self.assertIs(current_context(), test_context) @defer.inlineCallbacks - def test_does_not_cache_failures(self): + def test_does_not_cache_failures( + self, + ) -> Generator["defer.Deferred[Any]", object, None]: """Checks that, if the callback returns a failure, it is called again for the next request. """ called = [False] - def cb(): + def cb() -> "defer.Deferred[Tuple[int, JsonDict]]": if called[0]: # return a valid result the second time return defer.succeed(self.mock_http_response) @@ -130,7 +144,7 @@ def cb(): self.assertIs(current_context(), test_context) @defer.inlineCallbacks - def test_cleans_up(self): + def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]: cb = Mock(return_value=make_awaitable(self.mock_http_response)) yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg") # should NOT have cleaned up yet From 5ef9ff54efec96b08741ed0f250222040b689bb5 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 27 Jan 2023 11:18:36 +0000 Subject: [PATCH 080/344] 1.76.0rc2 --- CHANGES.md | 20 ++++++++++++++++++-- changelog.d/14914.bugfix | 1 - changelog.d/14917.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 25 insertions(+), 5 deletions(-) delete mode 100644 changelog.d/14914.bugfix delete mode 100644 changelog.d/14917.misc diff --git a/CHANGES.md b/CHANGES.md index cc5ee1619015..0edf6c34c938 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,12 +1,28 @@ -Synapse 1.76.0rc1 (2023-01-25) +Synapse 1.76.0rc2 (2023-01-27) ============================== -This release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/upgrade.md#faster-joins-are-enabled-by-default) for more details. +The 1.76 release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/upgrade.md#faster-joins-are-enabled-by-default) for more details. The upgrade from 1.75 to 1.76 changes the account data replication streams in a backwards-incompatible manner. Server operators running a multi-worker deployment should consult [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#changes-to-the-account-data-replication-streams). Those who are `poetry install`ing from source using our lockfile should ensure their poetry version is 1.3.2 or higher; [see upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#minimum-version-of-poetry-is-now-132). + +Bugfixes +-------- + +- Faster joins: Fix a bug introduced in Synapse 1.69 where device list EDUs could fail to be handled after a restart when a faster join sync is in progress. ([\#14914](https://github.com/matrix-org/synapse/issues/14914)) + + +Internal Changes +---------------- + +- Faster joins: Improve performance of looking up partial-state status of rooms. ([\#14917](https://github.com/matrix-org/synapse/issues/14917)) + + +Synapse 1.76.0rc1 (2023-01-25) +============================== + Features -------- diff --git a/changelog.d/14914.bugfix b/changelog.d/14914.bugfix deleted file mode 100644 index af73cca70f6e..000000000000 --- a/changelog.d/14914.bugfix +++ /dev/null @@ -1 +0,0 @@ -Faster joins: Fix a bug introduced in Synapse 1.69 where device list EDUs could fail to be handled after a restart when a faster join sync is in progress. diff --git a/changelog.d/14917.misc b/changelog.d/14917.misc deleted file mode 100644 index 4d1dd2639a10..000000000000 --- a/changelog.d/14917.misc +++ /dev/null @@ -1 +0,0 @@ -Faster joins: Improve performance of looking up partial-state status of rooms. diff --git a/debian/changelog b/debian/changelog index 05333662955c..26234491f357 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.76.0~rc2) stable; urgency=medium + + * New Synapse release 1.76.0rc2. + + -- Synapse Packaging team Fri, 27 Jan 2023 11:17:57 +0000 + matrix-synapse-py3 (1.76.0~rc1) stable; urgency=medium * Use Poetry 1.3.2 to manage the bundled virtualenv included with this package. diff --git a/pyproject.toml b/pyproject.toml index 0d671d25835b..fd05a2b55004 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.76.0rc1" +version = "1.76.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 265735db9d7b0698a511fc9389db4d6f104f1aa8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 27 Jan 2023 07:27:55 -0500 Subject: [PATCH 081/344] Use an enum for direction. (#14927) For better type safety we use an enum instead of strings to configure direction (backwards or forwards). --- changelog.d/14927.misc | 1 + synapse/api/constants.py | 7 +++ synapse/handlers/admin.py | 4 +- synapse/handlers/initial_sync.py | 16 +++++- synapse/handlers/pagination.py | 6 +-- synapse/handlers/relations.py | 8 ++- synapse/storage/databases/main/relations.py | 8 +-- synapse/storage/databases/main/stream.py | 59 +++++++++++---------- synapse/streams/config.py | 11 ++-- 9 files changed, 76 insertions(+), 44 deletions(-) create mode 100644 changelog.d/14927.misc diff --git a/changelog.d/14927.misc b/changelog.d/14927.misc new file mode 100644 index 000000000000..9f5384e60e78 --- /dev/null +++ b/changelog.d/14927.misc @@ -0,0 +1 @@ +Add missing type hints. \ No newline at end of file diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 6432d32d8376..6f9239d21c9b 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -17,6 +17,8 @@ """Contains constants from the specification.""" +import enum + from typing_extensions import Final # the max size of a (canonical-json-encoded) event @@ -290,3 +292,8 @@ class ApprovalNoticeMedium: NONE = "org.matrix.msc3866.none" EMAIL = "org.matrix.msc3866.email" + + +class Direction(enum.Enum): + BACKWARDS = "b" + FORWARDS = "f" diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 5bf8e863875b..c81ea347583d 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -16,7 +16,7 @@ import logging from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set -from synapse.api.constants import Membership +from synapse.api.constants import Direction, Membership from synapse.events import EventBase from synapse.types import JsonDict, RoomStreamToken, StateMap, UserID from synapse.visibility import filter_events_for_client @@ -197,7 +197,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> # efficient method perhaps but it does guarantee we get everything. while True: events, _ = await self.store.paginate_room_events( - room_id, from_key, to_key, limit=100, direction="f" + room_id, from_key, to_key, limit=100, direction=Direction.FORWARDS ) if not events: break diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 8c2260ad7dfa..191529bd8e27 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -15,7 +15,13 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple, cast -from synapse.api.constants import AccountDataTypes, EduTypes, EventTypes, Membership +from synapse.api.constants import ( + AccountDataTypes, + Direction, + EduTypes, + EventTypes, + Membership, +) from synapse.api.errors import SynapseError from synapse.events import EventBase from synapse.events.utils import SerializeEventConfig @@ -57,7 +63,13 @@ def __init__(self, hs: "HomeServer"): self.validator = EventValidator() self.snapshot_cache: ResponseCache[ Tuple[ - str, Optional[StreamToken], Optional[StreamToken], str, int, bool, bool + str, + Optional[StreamToken], + Optional[StreamToken], + Direction, + int, + bool, + bool, ] ] = ResponseCache(hs.get_clock(), "initial_sync_cache") self._event_serializer = hs.get_event_client_serializer() diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 1fe656718573..ceefa16b4981 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -19,7 +19,7 @@ from twisted.python.failure import Failure -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import Direction, EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter from synapse.events.utils import SerializeEventConfig @@ -448,7 +448,7 @@ async def get_messages( if pagin_config.from_token: from_token = pagin_config.from_token - elif pagin_config.direction == "f": + elif pagin_config.direction == Direction.FORWARDS: from_token = ( await self.hs.get_event_sources().get_start_token_for_pagination( room_id @@ -476,7 +476,7 @@ async def get_messages( room_id, requester, allow_departed_users=True ) - if pagin_config.direction == "b": + if pagin_config.direction == Direction.BACKWARDS: # if we're going backwards, we might need to backfill. This # requires that we have a topo token. if room_token.topological: diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index e96f9999a8d6..0fb15391e07e 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -17,7 +17,7 @@ import attr -from synapse.api.constants import EventTypes, RelationTypes +from synapse.api.constants import Direction, EventTypes, RelationTypes from synapse.api.errors import SynapseError from synapse.events import EventBase, relation_from_event from synapse.logging.context import make_deferred_yieldable, run_in_background @@ -413,7 +413,11 @@ async def _get_threads_for_events( # Attempt to find another event to use as the latest event. potential_events, _ = await self._main_store.get_relations_for_event( - event_id, event, room_id, RelationTypes.THREAD, direction="f" + event_id, + event, + room_id, + RelationTypes.THREAD, + direction=Direction.FORWARDS, ) # Filter out ignored users. diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index be2242b6aca3..0018d6f7abcb 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -30,7 +30,7 @@ import attr -from synapse.api.constants import MAIN_TIMELINE, RelationTypes +from synapse.api.constants import MAIN_TIMELINE, Direction, RelationTypes from synapse.api.errors import SynapseError from synapse.events import EventBase from synapse.storage._base import SQLBaseStore @@ -168,7 +168,7 @@ async def get_relations_for_event( relation_type: Optional[str] = None, event_type: Optional[str] = None, limit: int = 5, - direction: str = "b", + direction: Direction = Direction.BACKWARDS, from_token: Optional[StreamToken] = None, to_token: Optional[StreamToken] = None, ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]: @@ -181,8 +181,8 @@ async def get_relations_for_event( relation_type: Only fetch events with this relation type, if given. event_type: Only fetch events with this event type, if given. limit: Only fetch the most recent `limit` events. - direction: Whether to fetch the most recent first (`"b"`) or the - oldest first (`"f"`). + direction: Whether to fetch the most recent first (backwards) or the + oldest first (forwards). from_token: Fetch rows from the given token, or from the start if None. to_token: Fetch rows up to the given token, or up to the end if None. diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 8977bf33e786..818c46182e01 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -55,6 +55,7 @@ from twisted.internet import defer +from synapse.api.constants import Direction from synapse.api.filtering import Filter from synapse.events import EventBase from synapse.logging.context import make_deferred_yieldable, run_in_background @@ -86,7 +87,6 @@ _STREAM_TOKEN = "stream" _TOPOLOGICAL_TOKEN = "topological" - # Used as return values for pagination APIs @attr.s(slots=True, frozen=True, auto_attribs=True) class _EventDictReturn: @@ -104,7 +104,7 @@ class _EventsAround: def generate_pagination_where_clause( - direction: str, + direction: Direction, column_names: Tuple[str, str], from_token: Optional[Tuple[Optional[int], int]], to_token: Optional[Tuple[Optional[int], int]], @@ -130,27 +130,26 @@ def generate_pagination_where_clause( token, but include those that match the to token. Args: - direction: Whether we're paginating backwards("b") or forwards ("f"). + direction: Whether we're paginating backwards or forwards. column_names: The column names to bound. Must *not* be user defined as these get inserted directly into the SQL statement without escapes. from_token: The start point for the pagination. This is an exclusive - minimum bound if direction is "f", and an inclusive maximum bound if - direction is "b". + minimum bound if direction is forwards, and an inclusive maximum bound if + direction is backwards. to_token: The endpoint point for the pagination. This is an inclusive - maximum bound if direction is "f", and an exclusive minimum bound if - direction is "b". + maximum bound if direction is forwards, and an exclusive minimum bound if + direction is backwards. engine: The database engine to generate the clauses for Returns: The sql expression """ - assert direction in ("b", "f") where_clause = [] if from_token: where_clause.append( _make_generic_sql_bound( - bound=">=" if direction == "b" else "<", + bound=">=" if direction == Direction.BACKWARDS else "<", column_names=column_names, values=from_token, engine=engine, @@ -160,7 +159,7 @@ def generate_pagination_where_clause( if to_token: where_clause.append( _make_generic_sql_bound( - bound="<" if direction == "b" else ">=", + bound="<" if direction == Direction.BACKWARDS else ">=", column_names=column_names, values=to_token, engine=engine, @@ -171,7 +170,7 @@ def generate_pagination_where_clause( def generate_pagination_bounds( - direction: str, + direction: Direction, from_token: Optional[RoomStreamToken], to_token: Optional[RoomStreamToken], ) -> Tuple[ @@ -181,7 +180,7 @@ def generate_pagination_bounds( Generate a start and end point for this page of events. Args: - direction: Whether pagination is going forwards or backwards. One of "f" or "b". + direction: Whether pagination is going forwards or backwards. from_token: The token to start pagination at, or None to start at the first value. to_token: The token to end pagination at, or None to not limit the end point. @@ -201,7 +200,7 @@ def generate_pagination_bounds( # Tokens really represent positions between elements, but we use # the convention of pointing to the event before the gap. Hence # we have a bit of asymmetry when it comes to equalities. - if direction == "b": + if direction == Direction.BACKWARDS: order = "DESC" else: order = "ASC" @@ -215,7 +214,7 @@ def generate_pagination_bounds( if from_token: if from_token.topological is not None: from_bound = from_token.as_historical_tuple() - elif direction == "b": + elif direction == Direction.BACKWARDS: from_bound = ( None, from_token.get_max_stream_pos(), @@ -230,7 +229,7 @@ def generate_pagination_bounds( if to_token: if to_token.topological is not None: to_bound = to_token.as_historical_tuple() - elif direction == "b": + elif direction == Direction.BACKWARDS: to_bound = ( None, to_token.stream, @@ -245,20 +244,20 @@ def generate_pagination_bounds( def generate_next_token( - direction: str, last_topo_ordering: int, last_stream_ordering: int + direction: Direction, last_topo_ordering: int, last_stream_ordering: int ) -> RoomStreamToken: """ Generate the next room stream token based on the currently returned data. Args: - direction: Whether pagination is going forwards or backwards. One of "f" or "b". + direction: Whether pagination is going forwards or backwards. last_topo_ordering: The last topological ordering being returned. last_stream_ordering: The last stream ordering being returned. Returns: A new RoomStreamToken to return to the client. """ - if direction == "b": + if direction == Direction.BACKWARDS: # Tokens are positions between events. # This token points *after* the last event in the chunk. # We need it to point to the event before it in the chunk @@ -1201,7 +1200,7 @@ def _get_events_around_txn( txn, room_id, before_token, - direction="b", + direction=Direction.BACKWARDS, limit=before_limit, event_filter=event_filter, ) @@ -1211,7 +1210,7 @@ def _get_events_around_txn( txn, room_id, after_token, - direction="f", + direction=Direction.FORWARDS, limit=after_limit, event_filter=event_filter, ) @@ -1374,7 +1373,7 @@ def _paginate_room_events_txn( room_id: str, from_token: RoomStreamToken, to_token: Optional[RoomStreamToken] = None, - direction: str = "b", + direction: Direction = Direction.BACKWARDS, limit: int = -1, event_filter: Optional[Filter] = None, ) -> Tuple[List[_EventDictReturn], RoomStreamToken]: @@ -1385,8 +1384,8 @@ def _paginate_room_events_txn( room_id from_token: The token used to stream from to_token: A token which if given limits the results to only those before - direction: Either 'b' or 'f' to indicate whether we are paginating - forwards or backwards from `from_key`. + direction: Indicates whether we are paginating forwards or backwards + from `from_key`. limit: The maximum number of events to return. event_filter: If provided filters the events to those that match the filter. @@ -1489,8 +1488,12 @@ def _paginate_room_events_txn( _EventDictReturn(event_id, topological_ordering, stream_ordering) for event_id, instance_name, topological_ordering, stream_ordering in txn if _filter_results( - lower_token=to_token if direction == "b" else from_token, - upper_token=from_token if direction == "b" else to_token, + lower_token=to_token + if direction == Direction.BACKWARDS + else from_token, + upper_token=from_token + if direction == Direction.BACKWARDS + else to_token, instance_name=instance_name, topological_ordering=topological_ordering, stream_ordering=stream_ordering, @@ -1514,7 +1517,7 @@ async def paginate_room_events( room_id: str, from_key: RoomStreamToken, to_key: Optional[RoomStreamToken] = None, - direction: str = "b", + direction: Direction = Direction.BACKWARDS, limit: int = -1, event_filter: Optional[Filter] = None, ) -> Tuple[List[EventBase], RoomStreamToken]: @@ -1524,8 +1527,8 @@ async def paginate_room_events( room_id from_key: The token used to stream from to_key: A token which if given limits the results to only those before - direction: Either 'b' or 'f' to indicate whether we are paginating - forwards or backwards from `from_key`. + direction: Indicates whether we are paginating forwards or backwards + from `from_key`. limit: The maximum number of events to return. event_filter: If provided filters the events to those that match the filter. diff --git a/synapse/streams/config.py b/synapse/streams/config.py index 6df2de919cda..5cb787518164 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -16,6 +16,7 @@ import attr +from synapse.api.constants import Direction from synapse.api.errors import SynapseError from synapse.http.servlet import parse_integer, parse_string from synapse.http.site import SynapseRequest @@ -34,7 +35,7 @@ class PaginationConfig: from_token: Optional[StreamToken] to_token: Optional[StreamToken] - direction: str + direction: Direction limit: int @classmethod @@ -45,9 +46,13 @@ async def from_request( default_limit: int, default_dir: str = "f", ) -> "PaginationConfig": - direction = parse_string( - request, "dir", default=default_dir, allowed_values=["f", "b"] + direction_str = parse_string( + request, + "dir", + default=default_dir, + allowed_values=[Direction.FORWARDS.value, Direction.BACKWARDS.value], ) + direction = Direction(direction_str) from_tok_str = parse_string(request, "from") to_tok_str = parse_string(request, "to") From fca5617a0dc281a414194333f1030f416d0287bd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 27 Jan 2023 15:05:29 +0000 Subject: [PATCH 082/344] Describe faster joins --- CHANGES.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index 0edf6c34c938..10be47158ec6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,6 +8,23 @@ The upgrade from 1.75 to 1.76 changes the account data replication streams in a Those who are `poetry install`ing from source using our lockfile should ensure their poetry version is 1.3.2 or higher; [see upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#minimum-version-of-poetry-is-now-132). +Notes on faster joins +--------------------- + +The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms. + +After a faster join, Synapse considers that room "partially joined". In this state, you should be able to + +- read incoming messages; +- see incoming state changes, e.g. room topic changes; and +- send messages, if the room is unencrypted. + +Synapse has to spend more effort to complete the join in the background. Once this finishes, you will be able to + +- send messages, if the room is in encrypted; +- retrieve room history from before your join, if permitted by the room settings; and +- access the full list of room members. + Bugfixes -------- From 2a51f3ec36abeb1f5c1db795541988d1d9698e41 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 27 Jan 2023 10:16:21 -0500 Subject: [PATCH 083/344] Implement MSC3952: Intentional mentions (#14823) MSC3952 defines push rules which searches for mentions in a list of Matrix IDs in the event body, instead of searching the entire event body for display name / local part. This is implemented behind an experimental configuration flag and does not yet implement the backwards compatibility pieces of the MSC. --- changelog.d/14823.feature | 1 + rust/src/push/base_rules.rs | 21 +++++ rust/src/push/evaluator.rs | 25 +++++- rust/src/push/mod.rs | 34 ++++++++ stubs/synapse/synapse_rust/push.pyi | 5 +- synapse/api/constants.py | 3 + synapse/config/experimental.py | 5 ++ synapse/push/bulk_push_rule_evaluator.py | 25 +++++- synapse/storage/databases/main/push_rule.py | 1 + tests/push/test_bulk_push_rule_evaluator.py | 88 +++++++++++++++++++++ tests/push/test_push_rule_evaluator.py | 66 ++++++++++++++-- 11 files changed, 263 insertions(+), 11 deletions(-) create mode 100644 changelog.d/14823.feature diff --git a/changelog.d/14823.feature b/changelog.d/14823.feature new file mode 100644 index 000000000000..8293e99effbc --- /dev/null +++ b/changelog.d/14823.feature @@ -0,0 +1 @@ +Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 9140a69bb654..880eed0ef405 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -131,6 +131,14 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default: true, default_enabled: true, }, + PushRule { + rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mentioned"), + priority_class: 5, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::IsUserMention)]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), + default: true, + default_enabled: true, + }, PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"), priority_class: 5, @@ -139,6 +147,19 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default: true, default_enabled: true, }, + PushRule { + rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mentioned"), + priority_class: 5, + conditions: Cow::Borrowed(&[ + Condition::Known(KnownCondition::IsRoomMention), + Condition::Known(KnownCondition::SenderNotificationPermission { + key: Cow::Borrowed("room"), + }), + ]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), + default: true, + default_enabled: true, + }, PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.roomnotif"), priority_class: 5, diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 0242ee1c5ffc..aa71202e4379 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use anyhow::{Context, Error}; use lazy_static::lazy_static; @@ -68,6 +68,11 @@ pub struct PushRuleEvaluator { /// The "content.body", if any. body: String, + /// The user mentions that were part of the message. + user_mentions: BTreeSet, + /// True if the message is a room message. + room_mention: bool, + /// The number of users in the room. room_member_count: u64, @@ -100,6 +105,8 @@ impl PushRuleEvaluator { #[new] pub fn py_new( flattened_keys: BTreeMap, + user_mentions: BTreeSet, + room_mention: bool, room_member_count: u64, sender_power_level: Option, notification_power_levels: BTreeMap, @@ -116,6 +123,8 @@ impl PushRuleEvaluator { Ok(PushRuleEvaluator { flattened_keys, body, + user_mentions, + room_mention, room_member_count, notification_power_levels, sender_power_level, @@ -229,6 +238,14 @@ impl PushRuleEvaluator { KnownCondition::RelatedEventMatch(event_match) => { self.match_related_event_match(event_match, user_id)? } + KnownCondition::IsUserMention => { + if let Some(uid) = user_id { + self.user_mentions.contains(uid) + } else { + false + } + } + KnownCondition::IsRoomMention => self.room_mention, KnownCondition::ContainsDisplayName => { if let Some(dn) = display_name { if !dn.is_empty() { @@ -424,6 +441,8 @@ fn push_rule_evaluator() { flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string()); let evaluator = PushRuleEvaluator::py_new( flattened_keys, + BTreeSet::new(), + false, 10, Some(0), BTreeMap::new(), @@ -449,6 +468,8 @@ fn test_requires_room_version_supports_condition() { let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()]; let evaluator = PushRuleEvaluator::py_new( flattened_keys, + BTreeSet::new(), + false, 10, Some(0), BTreeMap::new(), @@ -483,7 +504,7 @@ fn test_requires_room_version_supports_condition() { }; let rules = PushRules::new(vec![custom_rule]); result = evaluator.run( - &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true), + &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false), None, None, ); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 842b13c88b2c..7e449f243371 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -269,6 +269,10 @@ pub enum KnownCondition { EventMatch(EventMatchCondition), #[serde(rename = "im.nheko.msc3664.related_event_match")] RelatedEventMatch(RelatedEventMatchCondition), + #[serde(rename = "org.matrix.msc3952.is_user_mention")] + IsUserMention, + #[serde(rename = "org.matrix.msc3952.is_room_mention")] + IsRoomMention, ContainsDisplayName, RoomMemberCount { #[serde(skip_serializing_if = "Option::is_none")] @@ -414,6 +418,7 @@ pub struct FilteredPushRules { msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, + msc3952_intentional_mentions: bool, } #[pymethods] @@ -425,6 +430,7 @@ impl FilteredPushRules { msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, + msc3952_intentional_mentions: bool, ) -> Self { Self { push_rules, @@ -432,6 +438,7 @@ impl FilteredPushRules { msc1767_enabled, msc3381_polls_enabled, msc3664_enabled, + msc3952_intentional_mentions, } } @@ -465,6 +472,11 @@ impl FilteredPushRules { return false; } + if !self.msc3952_intentional_mentions && rule.rule_id.contains("org.matrix.msc3952") + { + return false; + } + true }) .map(|r| { @@ -522,6 +534,28 @@ fn test_deserialize_unstable_msc3931_condition() { )); } +#[test] +fn test_deserialize_unstable_msc3952_user_condition() { + let json = r#"{"kind":"org.matrix.msc3952.is_user_mention"}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::IsUserMention) + )); +} + +#[test] +fn test_deserialize_unstable_msc3952_room_condition() { + let json = r#"{"kind":"org.matrix.msc3952.is_room_mention"}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::IsRoomMention) + )); +} + #[test] fn test_deserialize_custom_condition() { let json = r#"{"kind":"custom_tag"}"#; diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 304ed7111c20..588d90c25a82 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union +from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union from synapse.types import JsonDict @@ -46,6 +46,7 @@ class FilteredPushRules: msc1767_enabled: bool, msc3381_polls_enabled: bool, msc3664_enabled: bool, + msc3952_intentional_mentions: bool, ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... @@ -55,6 +56,8 @@ class PushRuleEvaluator: def __init__( self, flattened_keys: Mapping[str, str], + user_mentions: Set[str], + room_mention: bool, room_member_count: int, sender_power_level: Optional[int], notification_power_levels: Mapping[str, int], diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 6f9239d21c9b..0f224b34cd95 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -233,6 +233,9 @@ class EventContentFields: # The authorising user for joining a restricted room. AUTHORISING_USER: Final = "join_authorised_via_users_server" + # Use for mentioning users. + MSC3952_MENTIONS: Final = "org.matrix.msc3952.mentions" + # an unspecced field added to to-device messages to identify them uniquely-ish TO_DEVICE_MSGID: Final = "org.matrix.msgid" diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 2590c88cdeb1..d2d0270dddb1 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -168,3 +168,8 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC3925: do not replace events with their edits self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False) + + # MSC3952: Intentional mentions + self.msc3952_intentional_mentions = experimental.get( + "msc3952_intentional_mentions", False + ) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index f27ba64d5365..deaec1956472 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -22,13 +22,20 @@ List, Mapping, Optional, + Set, Tuple, Union, ) from prometheus_client import Counter -from synapse.api.constants import MAIN_TIMELINE, EventTypes, Membership, RelationTypes +from synapse.api.constants import ( + MAIN_TIMELINE, + EventContentFields, + EventTypes, + Membership, + RelationTypes, +) from synapse.api.room_versions import PushRuleRoomFlag, RoomVersion from synapse.event_auth import auth_types_for_event, get_user_power_level from synapse.events import EventBase, relation_from_event @@ -342,8 +349,24 @@ async def _action_for_event_by_user( for user_id, level in notification_levels.items(): notification_levels[user_id] = int(level) + # Pull out any user and room mentions. + mentions = event.content.get(EventContentFields.MSC3952_MENTIONS) + user_mentions: Set[str] = set() + room_mention = False + if isinstance(mentions, dict): + # Remove out any non-string items and convert to a set. + user_mentions_raw = mentions.get("user_ids") + if isinstance(user_mentions_raw, list): + user_mentions = set( + filter(lambda item: isinstance(item, str), user_mentions_raw) + ) + # Room mention is only true if the value is exactly true. + room_mention = mentions.get("room") is True + evaluator = PushRuleEvaluator( _flatten_dict(event, room_version=event.room_version), + user_mentions, + room_mention, room_member_count, sender_power_level, notification_levels, diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 14ca167b34b4..466a1145b78c 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -89,6 +89,7 @@ def _load_rules( msc1767_enabled=experimental_config.msc1767_enabled, msc3664_enabled=experimental_config.msc3664_enabled, msc3381_polls_enabled=experimental_config.msc3381_polls_enabled, + msc3952_intentional_mentions=experimental_config.msc3952_intentional_mentions, ) return filtered_rules diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 9c17a42b650f..aba62b5dc8fe 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any from unittest.mock import patch from twisted.test.proto_helpers import MemoryReactor +from synapse.api.constants import EventContentFields from synapse.api.room_versions import RoomVersions from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator from synapse.rest import admin @@ -126,3 +128,89 @@ def test_action_for_event_by_user_disabled_by_config(self) -> None: # Ensure no actions are generated! self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)])) bulk_evaluator._action_for_event_by_user.assert_not_called() + + @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) + def test_mentions(self) -> None: + """Test the behavior of an event which includes invalid mentions.""" + bulk_evaluator = BulkPushRuleEvaluator(self.hs) + + sentinel = object() + + def create_and_process(mentions: Any = sentinel) -> bool: + """Returns true iff the `mentions` trigger an event push action.""" + content = {} + if mentions is not sentinel: + content[EventContentFields.MSC3952_MENTIONS] = mentions + + # Create a new message event which should cause a notification. + event, context = self.get_success( + self.event_creation_handler.create_event( + self.requester, + { + "type": "test", + "room_id": self.room_id, + "content": content, + "sender": f"@bob:{self.hs.hostname}", + }, + ) + ) + + # Ensure no actions are generated! + self.get_success( + bulk_evaluator.action_for_events_by_user([(event, context)]) + ) + + # If any actions are generated for this event, return true. + result = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + table="event_push_actions_staging", + keyvalues={"event_id": event.event_id}, + retcols=("*",), + desc="get_event_push_actions_staging", + ) + ) + return len(result) > 0 + + # Not including the mentions field should not notify. + self.assertFalse(create_and_process()) + # An empty mentions field should not notify. + self.assertFalse(create_and_process({})) + + # Non-dict mentions should be ignored. + mentions: Any + for mentions in (None, True, False, 1, "foo", []): + self.assertFalse(create_and_process(mentions)) + + # A non-list should be ignored. + for mentions in (None, True, False, 1, "foo", {}): + self.assertFalse(create_and_process({"user_ids": mentions})) + + # The Matrix ID appearing anywhere in the list should notify. + self.assertTrue(create_and_process({"user_ids": [self.alice]})) + self.assertTrue(create_and_process({"user_ids": ["@another:test", self.alice]})) + + # Duplicate user IDs should notify. + self.assertTrue(create_and_process({"user_ids": [self.alice, self.alice]})) + + # Invalid entries in the list are ignored. + self.assertFalse(create_and_process({"user_ids": [None, True, False, {}, []]})) + self.assertTrue( + create_and_process({"user_ids": [None, True, False, {}, [], self.alice]}) + ) + + # Room mentions from those without power should not notify. + self.assertFalse(create_and_process({"room": True})) + + # Room mentions from those with power should notify. + self.helper.send_state( + self.room_id, + "m.room.power_levels", + {"notifications": {"room": 0}}, + self.token, + state_key="", + ) + self.assertTrue(create_and_process({"room": True})) + + # Invalid data should not notify. + for mentions in (None, False, 1, "foo", [], {}): + self.assertFalse(create_and_process({"room": mentions})) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 1b87756b751a..9d01c989d408 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Optional, Union, cast +from typing import Dict, List, Optional, Set, Union, cast import frozendict @@ -39,7 +39,12 @@ class PushRuleEvaluatorTestCase(unittest.TestCase): def _get_evaluator( - self, content: JsonMapping, related_events: Optional[JsonDict] = None + self, + content: JsonMapping, + *, + user_mentions: Optional[Set[str]] = None, + room_mention: bool = False, + related_events: Optional[JsonDict] = None, ) -> PushRuleEvaluator: event = FrozenEvent( { @@ -57,13 +62,15 @@ def _get_evaluator( power_levels: Dict[str, Union[int, Dict[str, int]]] = {} return PushRuleEvaluator( _flatten_dict(event), + user_mentions or set(), + room_mention, room_member_count, sender_power_level, cast(Dict[str, int], power_levels.get("notifications", {})), {} if related_events is None else related_events, - True, - event.room_version.msc3931_push_features, - True, + related_event_match_enabled=True, + room_version_feature_flags=event.room_version.msc3931_push_features, + msc3931_enabled=True, ) def test_display_name(self) -> None: @@ -90,6 +97,51 @@ def test_display_name(self) -> None: # A display name with spaces should work fine. self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar")) + def test_user_mentions(self) -> None: + """Check for user mentions.""" + condition = {"kind": "org.matrix.msc3952.is_user_mention"} + + # No mentions shouldn't match. + evaluator = self._get_evaluator({}) + self.assertFalse(evaluator.matches(condition, "@user:test", None)) + + # An empty set shouldn't match + evaluator = self._get_evaluator({}, user_mentions=set()) + self.assertFalse(evaluator.matches(condition, "@user:test", None)) + + # The Matrix ID appearing anywhere in the mentions list should match + evaluator = self._get_evaluator({}, user_mentions={"@user:test"}) + self.assertTrue(evaluator.matches(condition, "@user:test", None)) + + evaluator = self._get_evaluator( + {}, user_mentions={"@another:test", "@user:test"} + ) + self.assertTrue(evaluator.matches(condition, "@user:test", None)) + + # Note that invalid data is tested at tests.push.test_bulk_push_rule_evaluator.TestBulkPushRuleEvaluator.test_mentions + # since the BulkPushRuleEvaluator is what handles data sanitisation. + + def test_room_mentions(self) -> None: + """Check for room mentions.""" + condition = {"kind": "org.matrix.msc3952.is_room_mention"} + + # No room mention shouldn't match. + evaluator = self._get_evaluator({}) + self.assertFalse(evaluator.matches(condition, None, None)) + + # Room mention should match. + evaluator = self._get_evaluator({}, room_mention=True) + self.assertTrue(evaluator.matches(condition, None, None)) + + # A room mention and user mention is valid. + evaluator = self._get_evaluator( + {}, user_mentions={"@another:test"}, room_mention=True + ) + self.assertTrue(evaluator.matches(condition, None, None)) + + # Note that invalid data is tested at tests.push.test_bulk_push_rule_evaluator.TestBulkPushRuleEvaluator.test_mentions + # since the BulkPushRuleEvaluator is what handles data sanitisation. + def _assert_matches( self, condition: JsonDict, content: JsonMapping, msg: Optional[str] = None ) -> None: @@ -308,7 +360,7 @@ def test_related_event_match(self) -> None: }, } }, - { + related_events={ "m.in_reply_to": { "event_id": "$parent_event_id", "type": "m.room.message", @@ -408,7 +460,7 @@ def test_related_event_match_with_fallback(self) -> None: }, } }, - { + related_events={ "m.in_reply_to": { "event_id": "$parent_event_id", "type": "m.room.message", From 2b27a33bb693b63c663ecf2aa68e5a62cbfe122c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Jan 2023 10:31:05 +0000 Subject: [PATCH 084/344] Bump ijson from 3.1.4 to 3.2.0.post0 (#14935) * Bump ijson from 3.1.4 to 3.2.0.post0 Bumps [ijson](https://github.com/ICRAR/ijson) from 3.1.4 to 3.2.0.post0. - [Release notes](https://github.com/ICRAR/ijson/releases) - [Changelog](https://github.com/ICRAR/ijson/blob/master/CHANGELOG.md) - [Commits](https://github.com/ICRAR/ijson/compare/v3.1.4...v3.2.0.post0) --- updated-dependencies: - dependency-name: ijson dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14935.misc | 1 + poetry.lock | 142 +++++++++++++++++++++++------------------ 2 files changed, 80 insertions(+), 63 deletions(-) create mode 100644 changelog.d/14935.misc diff --git a/changelog.d/14935.misc b/changelog.d/14935.misc new file mode 100644 index 000000000000..0ad74b90eb73 --- /dev/null +++ b/changelog.d/14935.misc @@ -0,0 +1 @@ +Bump ijson from 3.1.4 to 3.2.0.post0. diff --git a/poetry.lock b/poetry.lock index 17a6645b5588..b3a49c1d89a8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -579,74 +579,90 @@ files = [ [[package]] name = "ijson" -version = "3.1.4" +version = "3.2.0.post0" description = "Iterative JSON parser with standard Python iterator interfaces" category = "main" optional = false python-versions = "*" files = [ - {file = "ijson-3.1.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:6c1a777096be5f75ffebb335c6d2ebc0e489b231496b7f2ca903aa061fe7d381"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:475fc25c3d2a86230b85777cae9580398b42eed422506bf0b6aacfa936f7bfcd"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:f587699b5a759e30accf733e37950cc06c4118b72e3e146edcea77dded467426"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:339b2b4c7bbd64849dd69ef94ee21e29dcd92c831f47a281fdd48122bb2a715a"}, - {file = "ijson-3.1.4-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:446ef8980504da0af8d20d3cb6452c4dc3d8aa5fd788098985e899b913191fe6"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:3997a2fdb28bc04b9ab0555db5f3b33ed28d91e9d42a3bf2c1842d4990beb158"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:fa10a1d88473303ec97aae23169d77c5b92657b7fb189f9c584974c00a79f383"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:9a5bf5b9d8f2ceaca131ee21fc7875d0f34b95762f4f32e4d65109ca46472147"}, - {file = "ijson-3.1.4-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:81cc8cee590c8a70cca3c9aefae06dd7cb8e9f75f3a7dc12b340c2e332d33a2a"}, - {file = "ijson-3.1.4-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4ea5fc50ba158f72943d5174fbc29ebefe72a2adac051c814c87438dc475cf78"}, - {file = "ijson-3.1.4-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:3b98861a4280cf09d267986cefa46c3bd80af887eae02aba07488d80eb798afa"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:068c692efba9692406b86736dcc6803e4a0b6280d7f0b7534bff3faec677ff38"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:86884ac06ac69cea6d89ab7b84683b3b4159c4013e4a20276d3fc630fe9b7588"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:41e5886ff6fade26f10b87edad723d2db14dcbb1178717790993fcbbb8ccd333"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:24b58933bf777d03dc1caa3006112ec7f9e6f6db6ffe1f5f5bd233cb1281f719"}, - {file = "ijson-3.1.4-cp35-cp35m-manylinux2014_aarch64.whl", hash = "sha256:13f80aad0b84d100fb6a88ced24bade21dc6ddeaf2bba3294b58728463194f50"}, - {file = "ijson-3.1.4-cp35-cp35m-win32.whl", hash = "sha256:fa9a25d0bd32f9515e18a3611690f1de12cb7d1320bd93e9da835936b41ad3ff"}, - {file = "ijson-3.1.4-cp35-cp35m-win_amd64.whl", hash = "sha256:c4c1bf98aaab4c8f60d238edf9bcd07c896cfcc51c2ca84d03da22aad88957c5"}, - {file = "ijson-3.1.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f0f2a87c423e8767368aa055310024fa28727f4454463714fef22230c9717f64"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:15507de59d74d21501b2a076d9c49abf927eb58a51a01b8f28a0a0565db0a99f"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:2e6bd6ad95ab40c858592b905e2bbb4fe79bbff415b69a4923dafe841ffadcb4"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:68e295bb12610d086990cedc89fb8b59b7c85740d66e9515aed062649605d0bf"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:3bb461352c0f0f2ec460a4b19400a665b8a5a3a2da663a32093df1699642ee3f"}, - {file = "ijson-3.1.4-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:f91c75edd6cf1a66f02425bafc59a22ec29bc0adcbc06f4bfd694d92f424ceb3"}, - {file = "ijson-3.1.4-cp36-cp36m-win32.whl", hash = "sha256:4c53cc72f79a4c32d5fc22efb85aa22f248e8f4f992707a84bdc896cc0b1ecf9"}, - {file = "ijson-3.1.4-cp36-cp36m-win_amd64.whl", hash = "sha256:ac9098470c1ff6e5c23ec0946818bc102bfeeeea474554c8d081dc934be20988"}, - {file = "ijson-3.1.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dcd6f04df44b1945b859318010234651317db2c4232f75e3933f8bb41c4fa055"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:5a2f40c053c837591636dc1afb79d85e90b9a9d65f3d9963aae31d1eb11bfed2"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:f50337e3b8e72ec68441b573c2848f108a8976a57465c859b227ebd2a2342901"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:454918f908abbed3c50a0a05c14b20658ab711b155e4f890900e6f60746dd7cc"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:387c2ec434cc1bc7dc9bd33ec0b70d95d443cc1e5934005f26addc2284a437ab"}, - {file = "ijson-3.1.4-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:179ed6fd42e121d252b43a18833df2de08378fac7bce380974ef6f5e522afefa"}, - {file = "ijson-3.1.4-cp37-cp37m-win32.whl", hash = "sha256:26a6a550b270df04e3f442e2bf0870c9362db4912f0e7bdfd300f30ea43115a2"}, - {file = "ijson-3.1.4-cp37-cp37m-win_amd64.whl", hash = "sha256:ff8cf7507d9d8939264068c2cff0a23f99703fa2f31eb3cb45a9a52798843586"}, - {file = "ijson-3.1.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:09c9d7913c88a6059cd054ff854958f34d757402b639cf212ffbec201a705a0d"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux1_i686.whl", hash = "sha256:702ba9a732116d659a5e950ee176be6a2e075998ef1bcde11cbf79a77ed0f717"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:667841591521158770adc90793c2bdbb47c94fe28888cb802104b8bbd61f3d51"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:df641dd07b38c63eecd4f454db7b27aa5201193df160f06b48111ba97ab62504"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:9348e7d507eb40b52b12eecff3d50934fcc3d2a15a2f54ec1127a36063b9ba8f"}, - {file = "ijson-3.1.4-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:93455902fdc33ba9485c7fae63ac95d96e0ab8942224a357113174bbeaff92e9"}, - {file = "ijson-3.1.4-cp38-cp38-win32.whl", hash = "sha256:5b725f2e984ce70d464b195f206fa44bebbd744da24139b61fec72de77c03a16"}, - {file = "ijson-3.1.4-cp38-cp38-win_amd64.whl", hash = "sha256:a5965c315fbb2dc9769dfdf046eb07daf48ae20b637da95ec8d62b629be09df4"}, - {file = "ijson-3.1.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b8ee7dbb07cec9ba29d60cfe4954b3cc70adb5f85bba1f72225364b59c1cf82b"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d9e01c55d501e9c3d686b6ee3af351c9c0c8c3e45c5576bd5601bee3e1300b09"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:297f26f27a04cd0d0a2f865d154090c48ea11b239cabe0a17a6c65f0314bd1ca"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:9239973100338a4138d09d7a4602bd289861e553d597cd67390c33bfc452253e"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:2a64c66a08f56ed45a805691c2fd2e1caef00edd6ccf4c4e5eff02cd94ad8364"}, - {file = "ijson-3.1.4-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:d17fd199f0d0a4ab6e0d541b4eec1b68b5bd5bb5d8104521e22243015b51049b"}, - {file = "ijson-3.1.4-cp39-cp39-win32.whl", hash = "sha256:70ee3c8fa0eba18c80c5911639c01a8de4089a4361bad2862a9949e25ec9b1c8"}, - {file = "ijson-3.1.4-cp39-cp39-win_amd64.whl", hash = "sha256:6bf2b64304321705d03fa5e403ec3f36fa5bb27bf661849ad62e0a3a49bc23e3"}, - {file = "ijson-3.1.4-pp27-pypy_73-macosx_10_9_x86_64.whl", hash = "sha256:5d7e3fcc3b6de76a9dba1e9fc6ca23dad18f0fa6b4e6499415e16b684b2e9af1"}, - {file = "ijson-3.1.4-pp27-pypy_73-manylinux1_x86_64.whl", hash = "sha256:a72eb0359ebff94754f7a2f00a6efe4c57716f860fc040c606dedcb40f49f233"}, - {file = "ijson-3.1.4-pp27-pypy_73-manylinux2010_x86_64.whl", hash = "sha256:28fc168f5faf5759fdfa2a63f85f1f7a148bbae98f34404a6ba19f3d08e89e87"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2844d4a38d27583897ed73f7946e205b16926b4cab2525d1ce17e8b08064c706"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:252defd1f139b5fb8c764d78d5e3a6df81543d9878c58992a89b261369ea97a7"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:15d5356b4d090c699f382c8eb6a2bcd5992a8c8e8b88c88bc6e54f686018328a"}, - {file = "ijson-3.1.4-pp36-pypy36_pp73-win32.whl", hash = "sha256:6774ec0a39647eea70d35fb76accabe3d71002a8701c0545b9120230c182b75b"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f11da15ec04cc83ff0f817a65a3392e169be8d111ba81f24d6e09236597bb28c"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:ee13ceeed9b6cf81b3b8197ef15595fc43fd54276842ed63840ddd49db0603da"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:97e4df67235fae40d6195711223520d2c5bf1f7f5087c2963fcde44d72ebf448"}, - {file = "ijson-3.1.4-pp37-pypy37_pp73-win32.whl", hash = "sha256:3d10eee52428f43f7da28763bb79f3d90bbbeea1accb15de01e40a00885b6e89"}, - {file = "ijson-3.1.4.tar.gz", hash = "sha256:1d1003ae3c6115ec9b587d29dd136860a81a23c7626b682e2b5b12c9fd30e4ea"}, + {file = "ijson-3.2.0.post0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:5809752045ef74c26adf159ed03df7fb7e7a8d656992fd7562663ed47d6d39d9"}, + {file = "ijson-3.2.0.post0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ce4be2beece2629bd24bcab147741d1532bd5ed40fb52f2b4fcde5c5bf606df0"}, + {file = "ijson-3.2.0.post0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5d365df54d18076f1d5f2ffb1eef2ac7f0d067789838f13d393b5586fbb77b02"}, + {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c93ae4d49d8cf8accfedc8a8e7815851f56ceb6e399b0c186754a68fed22844"}, + {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:47a56e3628c227081a2aa58569cbf2af378bad8af648aa904080e87cd6644cfb"}, + {file = "ijson-3.2.0.post0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8af68fe579f6f0b9a8b3f033d10caacfed6a4b89b8c7a1d9478a8f5d8aba4a1"}, + {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6eed1ddd3147de49226db4f213851cf7860493a7b6c7bd5e62516941c007094c"}, + {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:9ecbf85a6d73fc72f6534c38f7d92ed15d212e29e0dbe9810a465d61c8a66d23"}, + {file = "ijson-3.2.0.post0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd218b338ac68213c997d4c88437c0e726f16d301616bf837e1468901934042c"}, + {file = "ijson-3.2.0.post0-cp310-cp310-win32.whl", hash = "sha256:4e7c4fdc7d24747c8cc7d528c145afda4de23210bf4054bd98cd63bf07e4882d"}, + {file = "ijson-3.2.0.post0-cp310-cp310-win_amd64.whl", hash = "sha256:4d4e143908f47307042c9678803d27706e0e2099d0a6c1988c6cae1da07760bf"}, + {file = "ijson-3.2.0.post0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:56500dac8f52989ef7c0075257a8b471cbea8ef77f1044822742b3cbf2246e8b"}, + {file = "ijson-3.2.0.post0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:535665a77408b6bea56eb828806fae125846dff2e2e0ed4cb2e0a8e36244d753"}, + {file = "ijson-3.2.0.post0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a4465c90b25ca7903410fabe4145e7b45493295cc3b84ec1216653fbe9021276"}, + {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efee1e9b4f691e1086730f3010e31c55625bc2e0f7db292a38a2cdf2774c2e13"}, + {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fd55f7a46429de95383fc0d0158c1bfb798e976d59d52830337343c2d9bda5c"}, + {file = "ijson-3.2.0.post0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25919b444426f58dcc62f763d1c6be6297f309da85ecab55f51da6ca86fc9fdf"}, + {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c85892d68895ba7a0b16a0e6b7d9f9a0e30e86f2b1e0f6986243473ba8735432"}, + {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:27409ba44cfd006901971063d37699f72e092b5efaa1586288b5067d80c6b5bd"}, + {file = "ijson-3.2.0.post0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:11dfd64633fe1382c4237477ac3836f682ca17e25e0d0799e84737795b0611df"}, + {file = "ijson-3.2.0.post0-cp311-cp311-win32.whl", hash = "sha256:41e955e173f77f54337fecaaa58a35c464b75e232b1f939b282497134a4d4f0e"}, + {file = "ijson-3.2.0.post0-cp311-cp311-win_amd64.whl", hash = "sha256:b3bdd2e12d9b9a18713dd6f3c5ef3734fdab25b79b177054ba9e35ecc746cb6e"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:26b57838e712b8852c40ec6d74c6de8bb226446440e1af1354c077a6f81b9142"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6464242f7895268d3086d7829ef031b05c77870dad1e13e51ef79d0a9cfe029"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3c6cf18b61b94db9590f86af0dd60edbccb36e151643152b8688066f677fbc9"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:992e9e68003df32e2aa0f31eb82c0a94f21286203ab2f2b2c666410e17b59d2f"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:d3e255ef05b434f20fc9d4b18ea15733d1038bec3e4960d772b06216fa79e82d"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:424232c2bf3e8181f1b572db92c179c2376b57eba9fc8931453fba975f48cb80"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:bced6cd5b09d4d002dda9f37292dd58d26eb1c4d0d179b820d3708d776300bb4"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-win32.whl", hash = "sha256:a8c84dff2d60ae06d5280ec87cd63050bbd74a90c02bfc7c390c803cfc8ac8fc"}, + {file = "ijson-3.2.0.post0-cp36-cp36m-win_amd64.whl", hash = "sha256:a340413a9bf307fafd99254a4dd4ac6c567b91a205bf896dde18888315fd7fcd"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b3456cd5b16ec9db3ef23dd27f37bf5a14f765e8272e9af3e3de9ee9a4cba867"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eb838b4e4360e65c00aa13c78b35afc2477759d423b602b60335af5bed3de5b"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe7f414edd69dd9199b0dfffa0ada22f23d8009e10fe2a719e0993b7dcc2e6e2"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:183841b8d033ca95457f61fb0719185dc7f51a616070bdf1dcaf03473bed05b2"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1302dc6490da7d44c3a76a5f0b87d8bec9f918454c6d6e6bf4ed922e47da58bb"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3b21b1ecd20ed2f918f6f99cdfa68284a416c0f015ffa64b68fa933df1b24d40"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:e97e6e07851cefe7baa41f1ebf5c0899d2d00d94bfef59825752e4c784bebbe8"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-win32.whl", hash = "sha256:cd0450e76b9c629b7f86e7d5b91b7cc9c281dd719630160a992b19a856f7bdbd"}, + {file = "ijson-3.2.0.post0-cp37-cp37m-win_amd64.whl", hash = "sha256:bed8dcb7dbfdb98e647ad47676045e0891f610d38095dcfdae468e1e1efb2766"}, + {file = "ijson-3.2.0.post0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a7698bc480df76073067017f73ba4139dbaae20f7a6c9a0c7855b9c5e9a62124"}, + {file = "ijson-3.2.0.post0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2f204f6d4cedeb28326c230a0b046968b5263c234c65a5b18cee22865800fff7"}, + {file = "ijson-3.2.0.post0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9829a17f6f78d7f4d0aeff28c126926a1e5f86828ebb60d6a0acfa0d08457f9f"}, + {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f470f3d750e00df86e03254fdcb422d2f726f4fb3a0d8eeee35e81343985e58a"}, + {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb167ee21d9c413d6b0ab65ec12f3e7ea0122879da8b3569fa1063526f9f03a8"}, + {file = "ijson-3.2.0.post0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84eed88177f6c243c52b280cb094f751de600d98d2221e0dec331920894889ec"}, + {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:53f1a13eb99ab514c562869513172135d4b55a914b344e6518ba09ad3ef1e503"}, + {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:f6785ba0f65eb64b1ce3b7fcfec101085faf98f4e77b234f14287fd4138ffb25"}, + {file = "ijson-3.2.0.post0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:79b94662c2e9d366ab362c2c5858097eae0da100dea0dfd340db09ab28c8d5e8"}, + {file = "ijson-3.2.0.post0-cp38-cp38-win32.whl", hash = "sha256:5242cb2313ba3ece307b426efa56424ac13cc291c36f292b501d412a98ad0703"}, + {file = "ijson-3.2.0.post0-cp38-cp38-win_amd64.whl", hash = "sha256:775444a3b647350158d0b3c6c39c88b4a0995643a076cb104bf25042c9aedcf8"}, + {file = "ijson-3.2.0.post0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:1d64ffaab1d006a4fa9584a4c723e95cc9609bf6c3365478e250cd0bffaaadf3"}, + {file = "ijson-3.2.0.post0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:434e57e7ec5c334ccb0e67bb4d9e60c264dcb2a3843713dbeb12cb19fe42a668"}, + {file = "ijson-3.2.0.post0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:158494bfe89ccb32618d0e53b471364080ceb975462ec464d9f9f37d9832b653"}, + {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f20072376e338af0e51ccecb02335b4e242d55a9218a640f545be7fc64cca99"}, + {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3e8d46c1004afcf2bf513a8fb575ee2ec3d8009a2668566b5926a2dcf7f1a45"}, + {file = "ijson-3.2.0.post0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:986a0347fe19e5117a5241276b72add570839e5bcdc7a6dac4b538c5928eeff5"}, + {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:535a59d61b9aef6fc2a3d01564c1151e38e5a44b92cd6583cb4e8ccf0f58043f"}, + {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:830de03f391f7e72b8587bb178c22d534da31153e9ee4234d54ef82cde5ace5e"}, + {file = "ijson-3.2.0.post0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6def9ac8d73b76cb02e9e9837763f27f71e5e67ec0afae5f1f4cf8f61c39b1ac"}, + {file = "ijson-3.2.0.post0-cp39-cp39-win32.whl", hash = "sha256:11bb84a53c37e227e733c6dffad2037391cf0b3474bff78596dc4373b02008a0"}, + {file = "ijson-3.2.0.post0-cp39-cp39-win_amd64.whl", hash = "sha256:f349bee14d0a4a72ba41e1b1cce52af324ebf704f5066c09e3dd04cfa6f545f0"}, + {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5418066666b25b05f2b8ae2698408daa0afa68f07b0b217f2ab24465b7e9cbd9"}, + {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ccc4d4b947549f9c431651c02b95ef571412c78f88ded198612a41d5c5701a0"}, + {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dcec67fc15e5978ad286e8cc2a3f9347076e28e0e01673b5ace18c73da64e3ff"}, + {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee9537e8a8aa15dd2d0912737aeb6265e781e74f7f7cad8165048fcb5f39230"}, + {file = "ijson-3.2.0.post0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:03dfd4c8ed19e704d04b0ad4f34f598dc569fd3f73089f80eed698e7f6069233"}, + {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2d50b2ad9c6c51ca160aa60de7f4dacd1357c38d0e503f51aed95c1c1945ff53"}, + {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51c1db80d7791fb761ad9a6c70f521acd2c4b0e5afa2fe0d813beb2140d16c37"}, + {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:13f2939db983327dd0492f6c1c0e77be3f2cbf9b620c92c7547d1d2cd6ef0486"}, + {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f9d449f86f8971c24609e319811f7f3b6b734f0218c4a0e799debe19300d15b"}, + {file = "ijson-3.2.0.post0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7e0d1713a9074a7677eb8e43f424b731589d1c689d4676e2f57a5ce59d089e89"}, + {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c8646eb81eec559d7d8b1e51a5087299d06ecab3bc7da54c01f7df94350df135"}, + {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fe3a53e00c59de33b825ba8d6d39f544a7d7180983cd3d6bd2c3794ae35442"}, + {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93aaec00cbde65c192f15c21f3ee44d2ab0c11eb1a35020b5c4c2676f7fe01d0"}, + {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00594ed3ef2218fee8c652d9e7f862fb39f8251b67c6379ef12f7e044bf6bbf3"}, + {file = "ijson-3.2.0.post0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:1a75cfb34217b41136b714985be645f12269e4345da35d7b48aabd317c82fd10"}, + {file = "ijson-3.2.0.post0.tar.gz", hash = "sha256:80a5bd7e9923cab200701f67ad2372104328b99ddf249dbbe8834102c852d316"}, ] [[package]] From 1b3343c4b4129bacf880d31b8510d8bedccbd194 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Jan 2023 10:32:15 +0000 Subject: [PATCH 085/344] Bump types-pyyaml from 6.0.12.2 to 6.0.12.3 (#14936) * Bump types-pyyaml from 6.0.12.2 to 6.0.12.3 Bumps [types-pyyaml](https://github.com/python/typeshed) from 6.0.12.2 to 6.0.12.3. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pyyaml dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14936.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14936.misc diff --git a/changelog.d/14936.misc b/changelog.d/14936.misc new file mode 100644 index 000000000000..bd5001173f7a --- /dev/null +++ b/changelog.d/14936.misc @@ -0,0 +1 @@ +Bump types-pyyaml from 6.0.12.2 to 6.0.12.3. diff --git a/poetry.lock b/poetry.lock index b3a49c1d89a8..c77b07cb4eed 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2637,14 +2637,14 @@ types-cryptography = "*" [[package]] name = "types-pyyaml" -version = "6.0.12.2" +version = "6.0.12.3" description = "Typing stubs for PyYAML" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-PyYAML-6.0.12.2.tar.gz", hash = "sha256:6840819871c92deebe6a2067fb800c11b8a063632eb4e3e755914e7ab3604e83"}, - {file = "types_PyYAML-6.0.12.2-py3-none-any.whl", hash = "sha256:1e94e80aafee07a7e798addb2a320e32956a373f376655128ae20637adb2655b"}, + {file = "types-PyYAML-6.0.12.3.tar.gz", hash = "sha256:17ce17b3ead8f06e416a3b1d5b8ddc6cb82a422bb200254dd8b469434b045ffc"}, + {file = "types_PyYAML-6.0.12.3-py3-none-any.whl", hash = "sha256:879700e9f215afb20ab5f849590418ab500989f83a57e635689e1d50ccc63f0c"}, ] [[package]] From ed2b17bb9f03683fa402d77fba52b2aebc45485b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Jan 2023 10:32:27 +0000 Subject: [PATCH 086/344] Bump types-jsonschema from 4.17.0.2 to 4.17.0.3 (#14937) * Bump types-jsonschema from 4.17.0.2 to 4.17.0.3 Bumps [types-jsonschema](https://github.com/python/typeshed) from 4.17.0.2 to 4.17.0.3. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-jsonschema dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14937.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14937.misc diff --git a/changelog.d/14937.misc b/changelog.d/14937.misc new file mode 100644 index 000000000000..061568f010ac --- /dev/null +++ b/changelog.d/14937.misc @@ -0,0 +1 @@ +Bump types-jsonschema from 4.17.0.2 to 4.17.0.3. diff --git a/poetry.lock b/poetry.lock index c77b07cb4eed..5ff87ef05856 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2574,14 +2574,14 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.17.0.2" +version = "4.17.0.3" description = "Typing stubs for jsonschema" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-jsonschema-4.17.0.2.tar.gz", hash = "sha256:8b9e1140d4d780f0f19b5cab1b8a3732e8dd5e49dbc1f174cc0b499125ca6f6c"}, - {file = "types_jsonschema-4.17.0.2-py3-none-any.whl", hash = "sha256:8fd2f9aea4da54f9a811baa6963aac10fd680c18baa6237392c079b97d152738"}, + {file = "types-jsonschema-4.17.0.3.tar.gz", hash = "sha256:746aa466ffed9a1acc7bdbd0ac0b5e068f00be2ee008c1d1e14b0944a8c8b24b"}, + {file = "types_jsonschema-4.17.0.3-py3-none-any.whl", hash = "sha256:c8d5b26b7c8da6a48d7fb1ce029b97e0ff6e74db3727efb968c69f39ad013685"}, ] [[package]] From 43c7d814e67391949411bee2e03e174952c224c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 30 Jan 2023 10:32:51 +0000 Subject: [PATCH 087/344] Bump types-pillow from 9.4.0.3 to 9.4.0.5 (#14938) * Bump types-pillow from 9.4.0.3 to 9.4.0.5 Bumps [types-pillow](https://github.com/python/typeshed) from 9.4.0.3 to 9.4.0.5. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pillow dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14938.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14938.misc diff --git a/changelog.d/14938.misc b/changelog.d/14938.misc new file mode 100644 index 000000000000..f2fbabe8bb07 --- /dev/null +++ b/changelog.d/14938.misc @@ -0,0 +1 @@ +Bump types-pillow from 9.4.0.3 to 9.4.0.5. diff --git a/poetry.lock b/poetry.lock index 5ff87ef05856..99628643f0d7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2598,14 +2598,14 @@ files = [ [[package]] name = "types-pillow" -version = "9.4.0.3" +version = "9.4.0.5" description = "Typing stubs for Pillow" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-Pillow-9.4.0.3.tar.gz", hash = "sha256:eba8ff24457a1b8669b6099793f3d313d034d407ee9f6e5fdf12c86cf54914cd"}, - {file = "types_Pillow-9.4.0.3-py3-none-any.whl", hash = "sha256:f8f16a54ed315144296864df11f14beca82ec0990ea83710b7eac7eb1bb38971"}, + {file = "types-Pillow-9.4.0.5.tar.gz", hash = "sha256:941cefaac2f5297d7d2a9989633c95b4063112690dc21c965d46bd5a7fff3c76"}, + {file = "types_Pillow-9.4.0.5-py3-none-any.whl", hash = "sha256:a1d2b3e070b4d852af04f76f018d12bd51abb4abca3b725d91b35e01cda7a2de"}, ] [[package]] From cbb0ee43cc6ebb144de989421053d5a8d72cfe31 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 30 Jan 2023 21:27:52 +0000 Subject: [PATCH 088/344] Initial batch of notes on faster joins (#14677) Co-authored-by: Olivier Wilkinson (reivilibre) Co-authored-by: Shay --- changelog.d/14677.doc | 1 + docs/SUMMARY.md | 1 + .../synapse_architecture/faster_joins.md | 375 ++++++++++++++++++ 3 files changed, 377 insertions(+) create mode 100644 changelog.d/14677.doc create mode 100644 docs/development/synapse_architecture/faster_joins.md diff --git a/changelog.d/14677.doc b/changelog.d/14677.doc new file mode 100644 index 000000000000..7c275c77a896 --- /dev/null +++ b/changelog.d/14677.doc @@ -0,0 +1 @@ +Describe the ideas and the internal machinery behind faster joins. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8d68719958d6..ade77d49261c 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -97,6 +97,7 @@ - [Log Contexts](log_contexts.md) - [Replication](replication.md) - [TCP Replication](tcp_replication.md) + - [Faster remote joins](development/synapse_architecture/faster_joins.md) - [Internal Documentation](development/internal_documentation/README.md) - [Single Sign-On]() - [SAML](development/saml.md) diff --git a/docs/development/synapse_architecture/faster_joins.md b/docs/development/synapse_architecture/faster_joins.md new file mode 100644 index 000000000000..c32d713b8af7 --- /dev/null +++ b/docs/development/synapse_architecture/faster_joins.md @@ -0,0 +1,375 @@ +# How do faster joins work? + +This is a work-in-progress set of notes with two goals: +- act as a reference, explaining how Synapse implements faster joins; and +- record the rationale behind our choices. + +See also [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). + +The key idea is described by [MSC706](https://github.com/matrix-org/matrix-spec-proposals/pull/3902). This allows servers to +request a lightweight response to the federation `/send_join` endpoint. +This is called a **faster join**, also known as a **partial join**. In these +notes we'll usually use the word "partial" as it matches the database schema. + +## Overview: processing events in a partially-joined room + +The response to a partial join consists of +- the requested join event `J`, +- a list of the servers in the room (according to the state before `J`), +- a subset of the state of the room before `J`, +- the full auth chain of that state subset. + +Synapse marks the room as partially joined by adding a row to the database table +`partial_state_rooms`. It also marks the join event `J` as "partially stated", +meaning that we have neither received nor computed the full state before/after +`J`. This is done by adding a row to `partial_state_events`. + +
DB schema + +``` +matrix=> \d partial_state_events +Table "matrix.partial_state_events" + Column │ Type │ Collation │ Nullable │ Default +══════════╪══════╪═══════════╪══════════╪═════════ + room_id │ text │ │ not null │ + event_id │ text │ │ not null │ + +matrix=> \d partial_state_rooms + Table "matrix.partial_state_rooms" + Column │ Type │ Collation │ Nullable │ Default +════════════════════════╪════════╪═══════════╪══════════╪═════════ + room_id │ text │ │ not null │ + device_lists_stream_id │ bigint │ │ not null │ 0 + join_event_id │ text │ │ │ + joined_via │ text │ │ │ + +matrix=> \d partial_state_rooms_servers + Table "matrix.partial_state_rooms_servers" + Column │ Type │ Collation │ Nullable │ Default +═════════════╪══════╪═══════════╪══════════╪═════════ + room_id │ text │ │ not null │ + server_name │ text │ │ not null │ +``` + +Indices, foreign-keys and check constraints are omitted for brevity. +
+ +While partially joined to a room, Synapse receives events `E` from remote +homeservers as normal, and can create events at the request of its local users. +However, we run into trouble when we enforce the [checks on an event]. + +> 1. Is a valid event, otherwise it is dropped. For an event to be valid, it + must contain a room_id, and it must comply with the event format of that +> room version. +> 2. Passes signature checks, otherwise it is dropped. +> 3. Passes hash checks, otherwise it is redacted before being processed further. +> 4. Passes authorization rules based on the event’s auth events, otherwise it +> is rejected. +> 5. **Passes authorization rules based on the state before the event, otherwise +> it is rejected.** +> 6. **Passes authorization rules based on the current state of the room, +> otherwise it is “soft failed”.** + +[checks on an event]: https://spec.matrix.org/v1.5/server-server-api/#checks-performed-on-receipt-of-a-pdu + +We can enforce checks 1--4 without any problems. +But we cannot enforce checks 5 or 6 with complete certainty, since Synapse does +not know the full state before `E`, nor that of the room. + +### Partial state + +Instead, we make a best-effort approximation. +While the room is considered partially joined, Synapse tracks the "partial +state" before events. +This works in a similar way as regular state: + +- The partial state before `J` is that given to us by the partial join response. +- The partial state before an event `E` is the resolution of the partial states + after each of `E`'s `prev_event`s. +- If `E` is rejected or a message event, the partial state after `E` is the + partial state before `E`. +- Otherwise, the partial state after `E` is the partial state before `E`, plus + `E` itself. + +More concisely, partial state propagates just like full state; the only +difference is that we "seed" it with an incomplete initial state. +Synapse records that we have only calculated partial state for this event with +a row in `partial_state_events`. + +While the room remains partially stated, check 5 on incoming events to that +room becomes: + +> 5. Passes authorization rules based on **the resolution between the partial +> state before `E` and `E`'s auth events.** If the event fails to pass +> authorization rules, it is rejected. + +Additionally, check 6 is deleted: no soft-failures are enforced. + +While partially joined, the current partial state of the room is defined as the +resolution across the partial states after all forward extremities in the room. + +_Remark._ Events with partial state are _not_ considered +[outliers](../room-dag-concepts.md#outliers). + +### Approximation error + +Using partial state means the auth checks can fail in a few different ways[^2]. + +[^2]: Is this exhaustive? + +- We may erroneously accept an incoming event in check 5 based on partial state + when it would have been rejected based on full state, or vice versa. +- This means that an event could erroneously be added to the current partial + state of the room when it would not be present in the full state of the room, + or vice versa. +- Additionally, we may have skipped soft-failing an event that would have been + soft-failed based on full state. + +(Note that the discrepancies described in the last two bullets are user-visible.) + +This means that we have to be very careful when we want to lookup pieces of room +state in a partially-joined room. Our approximation of the state may be +incorrect or missing. But we can make some educated guesses. If + +- our partial state is likely to be correct, or +- the consequences of our partial state being incorrect are minor, + +then we proceed as normal, and let the resync process fix up any mistakes (see +below). + +When is our partial state likely to be correct? + +- It's more accurate the closer we are to the partial join event. (So we should + ideally complete the resync as soon as possible.) +- Non-member events: we will have received them as part of the partial join + response, if they were part of the room state at that point. We may + incorrectly accept or reject updates to that state (at first because we lack + remote membership information; later because of compounding errors), so these + can become incorrect over time. +- Local members' memberships: we are the only ones who can create join and + knock events for our users. We can't be completely confident in the + correctness of bans, invites and kicks from other homeservers, but the resync + process should correct any mistakes. +- Remote members' memberships: we did not receive these in the /send_join + response, so we have essentially no idea if these are correct or not. + +In short, we deem it acceptable to trust the partial state for non-membership +and local membership events. For remote membership events, we wait for the +resync to complete, at which point we have the full state of the room and can +proceed as normal. + +### Fixing the approximation with a resync + +The partial-state approximation is only a temporary affair. In the background, +synapse beings a "resync" process. This is a continuous loop, starting at the +partial join event and proceeding downwards through the event graph. For each +`E` seen in the room since partial join, Synapse will fetch + +- the event ids in the state of the room before `E`, via + [`/state_ids`](https://spec.matrix.org/v1.5/server-server-api/#get_matrixfederationv1state_idsroomid); +- the event ids in the full auth chain of `E`, included in the `/state_ids` + response; and +- any events from the previous two bullets that Synapse hasn't persisted, via + [`/state](https://spec.matrix.org/v1.5/server-server-api/#get_matrixfederationv1stateroomid). + +This means Synapse has (or can compute) the full state before `E`, which allows +Synapse to properly authorise or reject `E`. At this point ,the event +is considered to have "full state" rather than "partial state". We record this +by removing `E` from the `partial_state_events` table. + +\[**TODO:** Does Synapse persist a new state group for the full state +before `E`, or do we alter the (partial-)state group in-place? Are state groups +ever marked as partially-stated? \] + +This scheme means it is possible for us to have accepted and sent an event to +clients, only to reject it during the resync. From a client's perspective, the +effect is similar to a retroactive +state change due to state resolution---i.e. a "state reset".[^3] + +[^3]: Clients should refresh caches to detect such a change. Rumour has it that +sliding sync will fix this. + +When all events since the join `J` have been fully-stated, the room resync +process is complete. We record this by removing the room from +`partial_state_rooms`. + +## Faster joins on workers + +For the time being, the resync process happens on the master worker. +A new replication stream `un_partial_stated_room` is added. Whenever a resync +completes and a partial-state room becomes fully stated, a new message is sent +into that stream containing the room ID. + +## Notes on specific cases + +> **NB.** The notes below are rough. Some of them are hidden under `
` +disclosures because they have yet to be implemented in mainline Synapse. + +### Creating events during a partial join + +When sending out messages during a partial join, we assume our partial state is +accurate and proceed as normal. For this to have any hope of succeeding at all, +our partial state must contain an entry for each of the (type, state key) pairs +[specified by the auth rules](https://spec.matrix.org/v1.3/rooms/v10/#authorization-rules): + +- `m.room.create` +- `m.room.join_rules` +- `m.room.power_levels` +- `m.room.third_party_invite` +- `m.room.member` + +The first four of these should be present in the state before `J` that is given +to us in the partial join response; only membership events are omitted. In order +for us to consider the user joined, we must have their membership event. That +means the only possible omission is the target's membership in an invite, kick +or ban. + +The worst possibility is that we locally invite someone who is banned according to +the full state, because we lack their ban in our current partial state. The rest +of the federation---at least, those who are fully joined---should correctly +enforce the [membership transition constraints]( + https://spec.matrix.org/v1.3/client-server-api/#room-membership +). So any the erroneous invite should be ignored by fully-joined +homeservers and resolved by the resync for partially-joined homeservers. + + + +In more generality, there are two problems we're worrying about here: + +- We might create an event that is valid under our partial state, only to later + find out that is actually invalid according to the full state. +- Or: we might refuse to create an event that is invalid under our partial + state, even though it would be perfectly valid under the full state. + +However we expect such problems to be unlikely in practise, because + +- We trust that the room has sensible power levels, e.g. that bad actors with + high power levels are demoted before their ban. +- We trust that the resident server provides us up-to-date power levels, join + rules, etc. +- State changes in rooms are relatively infrequent, and the resync period is + relatively quick. + +#### Sending out the event over federation + +**TODO:** needs prose fleshing out. + +Normally: send out in a fed txn to all HSes in the room. +We only know that some HSes were in the room at some point. Wat do. +Send it out to the list of servers from the first join. +**TODO** what do we do here if we have full state? +If the prev event was created by us, we can risk sending it to the wrong HS. (Motivation: privacy concern of the content. Not such a big deal for a public room or an encrypted room. But non-encrypted invite-only...) +But don't want to send out sensitive data in other HS's events in this way. + +Suppose we discover after resync that we shouldn't have sent out one our events (not a prev_event) to a target HS. Not much we can do. +What about if we didn't send them an event but shouldn't've? +E.g. what if someone joined from a new HS shortly after you did? We wouldn't talk to them. +Could imagine sending out the "Missed" events after the resync but... painful to work out what they shuld have seen if they joined/left. +Instead, just send them the latest event (if they're still in the room after resync) and let them backfill.(?) +- Don't do this currently. +- If anyone who has received our messages sends a message to a HS we missed, they can backfill our messages +- Gap: rooms which are infrequently used and take a long time to resync. + +### Joining after a partial join + +**NB.** Not yet implemented. + +
+ +**TODO:** needs prose fleshing out. Liase with Matthieu. Explain why /send_join +(Rich was surprised we didn't just create it locally. Answer: to try and avoid +a join which then gets rejected after resync.) + +We don't know for sure that any join we create would be accepted. +E.g. the joined user might have been banned; the join rules might have changed in a way that we didn't realise... some way in which the partial state was mistaken. +Instead, do another partial make-join/send-join handshake to confirm that the join works. +- Probably going to get a bunch of duplicate state events and auth events.... but the point of partial joins is that these should be small. Many are already persisted = good. +- What if the second send_join response includes a different list of reisdent HSes? Could ignore it. + - Could even have a special flag that says "just make me a join", i.e. don't bother giving me state or servers in room. Deffo want the auth chain tho. +- SQ: wrt device lists it's a lot safer to ignore it!!!!! +- What if the state at the second join is inconsistent with what we have? Ignore it? + +
+ +### Leaving (and kicks and bans) after a partial join + +**NB.** Not yet implemented. + +
+ +When you're fully joined to a room, to have `U` leave a room their homeserver +needs to + +- create a new leave event for `U` which will be accepted by other homeservers, + and +- send that event `U` out to the homeservers in the federation. + +When is a leave event accepted? See +[v10 auth rules](https://spec.matrix.org/v1.5/rooms/v10/#authorization-rules): + +> 4. If type is m.room.member: [...] + > + > 5. If membership is leave: + > + > 1. If the sender matches state_key, allow if and only if that user’s current membership state is invite, join, or knock. +> 2. [...] + +I think this means that (well-formed!) self-leaves are governed entirely by +4.5.1. This means that if we correctly calculate state which says that `U` is +invited, joined or knocked and include it in the leave's auth events, our event +is accepted by checks 4 and 5 on incoming events. + +> 4. Passes authorization rules based on the event’s auth events, otherwise + > it is rejected. +> 5. Passes authorization rules based on the state before the event, otherwise + > it is rejected. + +The only way to fail check 6 is if the receiving server's current state of the +room says that `U` is banned, has left, or has no membership event. But this is +fine: the receiving server already thinks that `U` isn't in the room. + +> 6. Passes authorization rules based on the current state of the room, + > otherwise it is “soft failed”. + +For the second point (publishing the leave event), the best thing we can do is +to is publish to all HSes we know to be currently in the room. If they miss that +event, they might send us traffic in the room that we don't care about. This is +a problem with leaving after a "full" join; we don't seek to fix this with +partial joins. + +(With that said: there's nothing machine-readable in the /send response. I don't +think we can deduce "destination has left the room" from a failure to /send an +event into that room?) + +#### Can we still do this during a partial join? + +We can create leave events and can choose what gets included in our auth events, +so we can be sure that we pass check 4 on incoming events. For check 5, we might +have an incorrect view of the state before an event. +The only way we might erroneously think a leave is valid is if + +- the partial state before the leave has `U` joined, invited or knocked, but +- the full state before the leave has `U` banned, left or not present, + +in which case the leave doesn't make anything worse: other HSes already consider +us as not in the room, and will continue to do so after seeing the leave. + +The remaining obstacle is then: can we safely broadcast the leave event? We may +miss servers or incorrectly think that a server is in the room. Or the +destination server may be offline and miss the transaction containing our leave +event.This should self-heal when they see an event whose `prev_events` descends +from our leave. + +Another option we considered was to use federation `/send_leave` to ask a +fully-joined server to send out the event on our behalf. But that introduces +complexity without much benefit. Besides, as Rich put it, + +> sending out leaves is pretty best-effort currently + +so this is probably good enough as-is. + +#### Cleanup after the last leave + +**TODO**: what cleanup is necessary? Is it all just nice-to-have to save unused +work? +
From 510d4b06e7d346b4f94cb5598da90c9f668b62bb Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 30 Jan 2023 21:29:30 +0000 Subject: [PATCH 089/344] Handle malformed values of `notification.room` in power level events (#14942) * Better test for bad values in power levels events The previous test only checked that Synapse didn't raise an exception, but didn't check that we had correctly interpreted the value of the dodgy power level. It also conflated two things: bad room notification levels, and bad user levels. There _is_ logic for converting the latter to integers, but we should test it separately. * Check we ignore types that don't convert to int * Handle `None` values in `notifications.room` * Changelog * Also test that bad values are rejected by event auth * Docstring * linter scripttttttttt --- changelog.d/14942.bugfix | 1 + synapse/push/bulk_push_rule_evaluator.py | 19 ++++- tests/push/test_bulk_push_rule_evaluator.py | 93 ++++++++++++++++++--- tests/test_event_auth.py | 32 ++++++- 4 files changed, 128 insertions(+), 17 deletions(-) create mode 100644 changelog.d/14942.bugfix diff --git a/changelog.d/14942.bugfix b/changelog.d/14942.bugfix new file mode 100644 index 000000000000..a3ca3eb7e9d9 --- /dev/null +++ b/changelog.d/14942.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.68.0 where we were unable to service remote joins in rooms with `@room` notification levels set to `null` in their (malformed) power levels. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index deaec1956472..88cfc05d0552 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -69,6 +69,9 @@ } +SENTINEL = object() + + def _should_count_as_unread(event: EventBase, context: EventContext) -> bool: # Exclude rejected and soft-failed events. if context.rejected or event.internal_metadata.is_soft_failed(): @@ -343,11 +346,21 @@ async def _action_for_event_by_user( related_events = await self._related_events(event) # It's possible that old room versions have non-integer power levels (floats or - # strings). Workaround this by explicitly converting to int. + # strings; even the occasional `null`). For old rooms, we interpret these as if + # they were integers. Do this here for the `@room` power level threshold. + # Note that this is done automatically for the sender's power level by + # _get_power_levels_and_sender_level in its call to get_user_power_level + # (even for room V10.) notification_levels = power_levels.get("notifications", {}) if not event.room_version.msc3667_int_only_power_levels: - for user_id, level in notification_levels.items(): - notification_levels[user_id] = int(level) + keys = list(notification_levels.keys()) + for key in keys: + level = notification_levels.get(key, SENTINEL) + if level is not SENTINEL and type(level) is not int: + try: + notification_levels[key] = int(level) + except (TypeError, ValueError): + del notification_levels[key] # Pull out any user and room mentions. mentions = event.content.get(EventContentFields.MSC3952_MENTIONS) diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index aba62b5dc8fe..fda48d9f61db 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -15,6 +15,8 @@ from typing import Any from unittest.mock import patch +from parameterized import parameterized + from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventContentFields @@ -48,35 +50,84 @@ def prepare( self.requester = create_requester(self.alice) self.room_id = self.helper.create_room_as( - self.alice, room_version=RoomVersions.V9.identifier, tok=self.token + # This is deliberately set to V9, because we want to test the logic which + # handles stringy power levels. Stringy power levels were outlawed in V10. + self.alice, + room_version=RoomVersions.V9.identifier, + tok=self.token, ) self.event_creation_handler = self.hs.get_event_creation_handler() - def test_action_for_event_by_user_handles_noninteger_power_levels(self) -> None: - """We should convert floats and strings to integers before passing to Rust. + @parameterized.expand( + [ + # The historically-permitted bad values. Alice's notification should be + # allowed if this threshold is at or below her power level (60) + ("100", False), + ("0", True), + (12.34, True), + (60.0, True), + (67.89, False), + # Values that int(...) would not successfully cast should be ignored. + # The room notification level should then default to 50, per the spec, so + # Alice's notification is allowed. + (None, True), + # We haven't seen `"room": []` or `"room": {}` in the wild (yet), but + # let's check them for paranoia's sake. + ([], True), + ({}, True), + ] + ) + def test_action_for_event_by_user_handles_noninteger_room_power_levels( + self, bad_room_level: object, should_permit: bool + ) -> None: + """We should convert strings in `room` to integers before passing to Rust. + + Test this as follows: + - Create a room as Alice and invite two other users Bob and Charlie. + - Set PLs so that Alice has PL 60 and `notifications.room` is set to a bad value. + - Have Alice create a message notifying @room. + - Evaluate notification actions for that message. This should not raise. + - Look in the DB to see if that message triggered a highlight for Bob. + + The test is parameterised with two arguments: + - the bad power level value for "room", before JSON serisalistion + - whether Bob should expect the message to be highlighted Reproduces #14060. A lack of validation: the gift that keeps on giving. """ - - # Alter the power levels in that room to include stringy and floaty levels. - # We need to suppress the validation logic or else it will reject these dodgy - # values. (Presumably this validation was not always present.) + # Join another user to the room, so that there is someone to see Alice's + # @room notification. + bob = self.register_user("bob", "pass") + bob_token = self.login(bob, "pass") + self.helper.join(self.room_id, bob, tok=bob_token) + + # Alter the power levels in that room to include the bad @room notification + # level. We need to suppress + # + # - canonicaljson validation, because canonicaljson forbids floats; + # - the event jsonschema validation, because it will forbid bad values; and + # - the auth rules checks, because they stop us from creating power levels + # with `"room": null`. (We want to test this case, because we have seen it + # in the wild.) + # + # We have seen stringy and null values for "room" in the wild, so presumably + # some of this validation was missing in the past. with patch("synapse.events.validator.validate_canonicaljson"), patch( "synapse.events.validator.jsonschema.validate" - ): - self.helper.send_state( + ), patch("synapse.handlers.event_auth.check_state_dependent_auth_rules"): + pl_event_id = self.helper.send_state( self.room_id, "m.room.power_levels", { - "users": {self.alice: "100"}, # stringy - "notifications": {"room": 100.0}, # float + "users": {self.alice: 60}, + "notifications": {"room": bad_room_level}, }, self.token, state_key="", - ) + )["event_id"] # Create a new message event, and try to evaluate it under the dodgy # power level event. @@ -88,10 +139,11 @@ def test_action_for_event_by_user_handles_noninteger_power_levels(self) -> None: "room_id": self.room_id, "content": { "msgtype": "m.text", - "body": "helo", + "body": "helo @room", }, "sender": self.alice, }, + prev_event_ids=[pl_event_id], ) ) @@ -99,6 +151,21 @@ def test_action_for_event_by_user_handles_noninteger_power_levels(self) -> None: # should not raise self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)])) + # Did Bob see Alice's @room notification? + highlighted_actions = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + table="event_push_actions_staging", + keyvalues={ + "event_id": event.event_id, + "user_id": bob, + "highlight": 1, + }, + retcols=("*",), + desc="get_event_push_actions_staging", + ) + ) + self.assertEqual(len(highlighted_actions), int(should_permit)) + @override_config({"push": {"enabled": False}}) def test_action_for_event_by_user_disabled_by_config(self) -> None: """Ensure that push rules are not calculated when disabled in the config""" diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index f4d9fba0a14c..0a7937f1cc72 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -13,7 +13,7 @@ # limitations under the License. import unittest -from typing import Collection, Dict, Iterable, List, Optional +from typing import Any, Collection, Dict, Iterable, List, Optional from parameterized import parameterized @@ -728,6 +728,36 @@ def test_room_v10_rejects_string_power_levels(self) -> None: pl_event.room_version, pl_event2, {("fake_type", "fake_key"): pl_event} ) + def test_room_v10_rejects_other_non_integer_power_levels(self) -> None: + """We should reject PLs that are non-integer, non-string JSON values. + + test_room_v10_rejects_string_power_levels above handles the string case. + """ + + def create_event(pl_event_content: Dict[str, Any]) -> EventBase: + return make_event_from_dict( + { + "room_id": TEST_ROOM_ID, + **_maybe_get_event_id_dict_for_room_version(RoomVersions.V10), + "type": "m.room.power_levels", + "sender": "@test:test.com", + "state_key": "", + "content": pl_event_content, + "signatures": {"test.com": {"ed25519:0": "some9signature"}}, + }, + room_version=RoomVersions.V10, + ) + + contents: Iterable[Dict[str, Any]] = [ + {"notifications": {"room": None}}, + {"users": {"@alice:wonderland": []}}, + {"users_default": {}}, + ] + for content in contents: + event = create_event(content) + with self.assertRaises(SynapseError): + event_auth._check_power_levels(event.room_version, event, {}) + # helpers for making events TEST_DOMAIN = "example.com" From 796a4b74823b721c72de07e45718f05e78e1565d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 31 Jan 2023 10:33:07 +0000 Subject: [PATCH 090/344] Prefer `type(x) is int` to `isinstance(x, int)` (#14945) * Perfer `type(x) is int` to `isinstance(x, int)` This covered all additional instances I could see where `x` was user-controlled. The remaining cases are ``` $ rg -s 'isinstance.*[^_]int' tests/replication/_base.py 576: if isinstance(obj, int): synapse/util/caches/stream_change_cache.py 136: assert isinstance(stream_pos, int) 214: assert isinstance(stream_pos, int) 246: assert isinstance(stream_pos, int) 267: assert isinstance(stream_pos, int) synapse/replication/tcp/external_cache.py 133: if isinstance(result, int): synapse/metrics/__init__.py 100: if isinstance(calls, (int, float)): synapse/handlers/appservice.py 262: assert isinstance(new_token, int) synapse/config/_util.py 62: if isinstance(p, int): ``` which cover metrics, logic related to `jsonschema`, and replication and data streams. AFAICS these are all internal to Synapse * Changelog --- changelog.d/14945.misc | 1 + synapse/config/_base.py | 72 ++++++++++++++++------- synapse/config/cache.py | 4 +- synapse/config/server.py | 2 +- synapse/events/validator.py | 4 +- synapse/federation/federation_client.py | 2 +- synapse/handlers/message.py | 2 +- synapse/rest/admin/__init__.py | 2 +- synapse/rest/admin/registration_tokens.py | 15 +++-- synapse/rest/admin/users.py | 6 +- synapse/rest/client/report_event.py | 2 +- synapse/rest/media/v1/oembed.py | 2 +- synapse/rest/media/v1/thumbnailer.py | 2 +- synapse/storage/databases/main/events.py | 6 +- 14 files changed, 75 insertions(+), 47 deletions(-) create mode 100644 changelog.d/14945.misc diff --git a/changelog.d/14945.misc b/changelog.d/14945.misc new file mode 100644 index 000000000000..654174f9a81f --- /dev/null +++ b/changelog.d/14945.misc @@ -0,0 +1 @@ +Fix various long-standing bugs in Synapse's config, event and request handling where booleans were unintentionally accepted where an integer was expected. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 1f6362aedd56..2ce60610ca6f 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -174,15 +174,29 @@ def __init__(self, root_config: "RootConfig" = None): @staticmethod def parse_size(value: Union[str, int]) -> int: - if isinstance(value, int): + """Interpret `value` as a number of bytes. + + If an integer is provided it is treated as bytes and is unchanged. + + String byte sizes can have a suffix of 'K' or `M`, representing kibibytes and + mebibytes respectively. No suffix is understood as a plain byte count. + + Raises: + TypeError, if given something other than an integer or a string + ValueError: if given a string not of the form described above. + """ + if type(value) is int: return value - sizes = {"K": 1024, "M": 1024 * 1024} - size = 1 - suffix = value[-1] - if suffix in sizes: - value = value[:-1] - size = sizes[suffix] - return int(value) * size + elif type(value) is str: + sizes = {"K": 1024, "M": 1024 * 1024} + size = 1 + suffix = value[-1] + if suffix in sizes: + value = value[:-1] + size = sizes[suffix] + return int(value) * size + else: + raise TypeError(f"Bad byte size {value!r}") @staticmethod def parse_duration(value: Union[str, int]) -> int: @@ -198,22 +212,36 @@ def parse_duration(value: Union[str, int]) -> int: Returns: The number of milliseconds in the duration. + + Raises: + TypeError, if given something other than an integer or a string + ValueError: if given a string not of the form described above. """ - if isinstance(value, int): + if type(value) is int: return value - second = 1000 - minute = 60 * second - hour = 60 * minute - day = 24 * hour - week = 7 * day - year = 365 * day - sizes = {"s": second, "m": minute, "h": hour, "d": day, "w": week, "y": year} - size = 1 - suffix = value[-1] - if suffix in sizes: - value = value[:-1] - size = sizes[suffix] - return int(value) * size + elif type(value) is str: + second = 1000 + minute = 60 * second + hour = 60 * minute + day = 24 * hour + week = 7 * day + year = 365 * day + sizes = { + "s": second, + "m": minute, + "h": hour, + "d": day, + "w": week, + "y": year, + } + size = 1 + suffix = value[-1] + if suffix in sizes: + value = value[:-1] + size = sizes[suffix] + return int(value) * size + else: + raise TypeError(f"Bad duration {value!r}") @staticmethod def abspath(file_path: str) -> str: diff --git a/synapse/config/cache.py b/synapse/config/cache.py index 015b2a138e85..05f69cb1bacc 100644 --- a/synapse/config/cache.py +++ b/synapse/config/cache.py @@ -126,7 +126,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: cache_config = config.get("caches") or {} self.global_factor = cache_config.get("global_factor", _DEFAULT_FACTOR_SIZE) - if not isinstance(self.global_factor, (int, float)): + if type(self.global_factor) not in (int, float): raise ConfigError("caches.global_factor must be a number.") # Load cache factors from the config @@ -151,7 +151,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: ) for cache, factor in individual_factors.items(): - if not isinstance(factor, (int, float)): + if type(factor) not in (int, float): raise ConfigError( "caches.per_cache_factors.%s must be a number" % (cache,) ) diff --git a/synapse/config/server.py b/synapse/config/server.py index 80bcfa40800c..ecdaa2d9dd24 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -904,7 +904,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig: raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type")) port = listener.get("port") - if not isinstance(port, int): + if type(port) is not int: raise ConfigError("Listener configuration is lacking a valid 'port' option") tls = listener.get("tls", False) diff --git a/synapse/events/validator.py b/synapse/events/validator.py index a6f0104396e5..fb1737b910e9 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -139,7 +139,7 @@ def _validate_retention(self, event: EventBase) -> None: max_lifetime = event.content.get("max_lifetime") if min_lifetime is not None: - if not isinstance(min_lifetime, int): + if type(min_lifetime) is not int: raise SynapseError( code=400, msg="'min_lifetime' must be an integer", @@ -147,7 +147,7 @@ def _validate_retention(self, event: EventBase) -> None: ) if max_lifetime is not None: - if not isinstance(max_lifetime, int): + if type(max_lifetime) is not int: raise SynapseError( code=400, msg="'max_lifetime' must be an integer", diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index f185b6c1f9a4..feb32e40e52f 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -1864,7 +1864,7 @@ def from_json_dict(cls, d: JsonDict) -> "TimestampToEventResponse": ) origin_server_ts = d.get("origin_server_ts") - if not isinstance(origin_server_ts, int): + if type(origin_server_ts) is not int: raise ValueError( "Invalid response: 'origin_server_ts' must be a int but received %r" % origin_server_ts diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 3278a695ed61..6290f7f52316 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -377,7 +377,7 @@ def maybe_schedule_expiry(self, event: EventBase) -> None: """ expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER) - if not isinstance(expiry_ts, int) or event.is_state(): + if type(expiry_ts) is not int or event.is_state(): return # _schedule_expiry_for_event won't actually schedule anything if there's already diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index fb73886df061..79f22a59f195 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -152,7 +152,7 @@ async def on_POST( logger.info("[purge] purging up to token %s (event_id %s)", token, event_id) elif "purge_up_to_ts" in body: ts = body["purge_up_to_ts"] - if not isinstance(ts, int): + if type(ts) is not int: raise SynapseError( HTTPStatus.BAD_REQUEST, "purge_up_to_ts must be an int", diff --git a/synapse/rest/admin/registration_tokens.py b/synapse/rest/admin/registration_tokens.py index af606e925216..95e751288b03 100644 --- a/synapse/rest/admin/registration_tokens.py +++ b/synapse/rest/admin/registration_tokens.py @@ -143,7 +143,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: else: # Get length of token to generate (default is 16) length = body.get("length", 16) - if not isinstance(length, int): + if type(length) is not int: raise SynapseError( HTTPStatus.BAD_REQUEST, "length must be an integer", @@ -163,8 +163,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: uses_allowed = body.get("uses_allowed", None) if not ( - uses_allowed is None - or (isinstance(uses_allowed, int) and uses_allowed >= 0) + uses_allowed is None or (type(uses_allowed) is int and uses_allowed >= 0) ): raise SynapseError( HTTPStatus.BAD_REQUEST, @@ -173,13 +172,13 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: ) expiry_time = body.get("expiry_time", None) - if not isinstance(expiry_time, (int, type(None))): + if type(expiry_time) not in (int, type(None)): raise SynapseError( HTTPStatus.BAD_REQUEST, "expiry_time must be an integer or null", Codes.INVALID_PARAM, ) - if isinstance(expiry_time, int) and expiry_time < self.clock.time_msec(): + if type(expiry_time) is int and expiry_time < self.clock.time_msec(): raise SynapseError( HTTPStatus.BAD_REQUEST, "expiry_time must not be in the past", @@ -284,7 +283,7 @@ async def on_PUT(self, request: SynapseRequest, token: str) -> Tuple[int, JsonDi uses_allowed = body["uses_allowed"] if not ( uses_allowed is None - or (isinstance(uses_allowed, int) and uses_allowed >= 0) + or (type(uses_allowed) is int and uses_allowed >= 0) ): raise SynapseError( HTTPStatus.BAD_REQUEST, @@ -295,13 +294,13 @@ async def on_PUT(self, request: SynapseRequest, token: str) -> Tuple[int, JsonDi if "expiry_time" in body: expiry_time = body["expiry_time"] - if not isinstance(expiry_time, (int, type(None))): + if type(expiry_time) not in (int, type(None)): raise SynapseError( HTTPStatus.BAD_REQUEST, "expiry_time must be an integer or null", Codes.INVALID_PARAM, ) - if isinstance(expiry_time, int) and expiry_time < self.clock.time_msec(): + if type(expiry_time) is int and expiry_time < self.clock.time_msec(): raise SynapseError( HTTPStatus.BAD_REQUEST, "expiry_time must not be in the past", diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 6e0c44be2abe..0841b89c1a66 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -973,7 +973,7 @@ async def on_POST( body = parse_json_object_from_request(request, allow_empty_body=True) valid_until_ms = body.get("valid_until_ms") - if valid_until_ms and not isinstance(valid_until_ms, int): + if type(valid_until_ms) not in (int, type(None)): raise SynapseError( HTTPStatus.BAD_REQUEST, "'valid_until_ms' parameter must be an int" ) @@ -1125,14 +1125,14 @@ async def on_POST( messages_per_second = body.get("messages_per_second", 0) burst_count = body.get("burst_count", 0) - if not isinstance(messages_per_second, int) or messages_per_second < 0: + if type(messages_per_second) is not int or messages_per_second < 0: raise SynapseError( HTTPStatus.BAD_REQUEST, "%r parameter must be a positive int" % (messages_per_second,), errcode=Codes.INVALID_PARAM, ) - if not isinstance(burst_count, int) or burst_count < 0: + if type(burst_count) is not int or burst_count < 0: raise SynapseError( HTTPStatus.BAD_REQUEST, "%r parameter must be a positive int" % (burst_count,), diff --git a/synapse/rest/client/report_event.py b/synapse/rest/client/report_event.py index 6e962a45321e..e2b410cf32a3 100644 --- a/synapse/rest/client/report_event.py +++ b/synapse/rest/client/report_event.py @@ -54,7 +54,7 @@ async def on_POST( "Param 'reason' must be a string", Codes.BAD_JSON, ) - if not isinstance(body.get("score", 0), int): + if type(body.get("score", 0)) is not int: raise SynapseError( HTTPStatus.BAD_REQUEST, "Param 'score' must be an integer", diff --git a/synapse/rest/media/v1/oembed.py b/synapse/rest/media/v1/oembed.py index a3738a62507d..7592aa5d4767 100644 --- a/synapse/rest/media/v1/oembed.py +++ b/synapse/rest/media/v1/oembed.py @@ -200,7 +200,7 @@ def parse_oembed_response(self, url: str, raw_body: bytes) -> OEmbedResult: calc_description_and_urls(open_graph_response, oembed["html"]) for size in ("width", "height"): val = oembed.get(size) - if val is not None and isinstance(val, int): + if type(val) is int: open_graph_response[f"og:video:{size}"] = val elif oembed_type == "link": diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index a48a4de92ae2..9480cc576332 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -77,7 +77,7 @@ def __init__(self, input_path: str): image_exif = self.image._getexif() # type: ignore if image_exif is not None: image_orientation = image_exif.get(EXIF_ORIENTATION_TAG) - assert isinstance(image_orientation, int) + assert type(image_orientation) is int self.transpose_method = EXIF_TRANSPOSE_MAPPINGS.get(image_orientation) except Exception as e: # A lot of parsing errors can happen when parsing EXIF diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 0f097a2927c1..1536937b67e8 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1651,7 +1651,7 @@ def _update_metadata_tables_txn( if self._ephemeral_messages_enabled: # If there's an expiry timestamp on the event, store it. expiry_ts = event.content.get(EventContentFields.SELF_DESTRUCT_AFTER) - if isinstance(expiry_ts, int) and not event.is_state(): + if type(expiry_ts) is int and not event.is_state(): self._insert_event_expiry_txn(txn, event.event_id, expiry_ts) # Insert into the room_memberships table. @@ -2133,10 +2133,10 @@ def _store_retention_policy_for_room_txn( ): if ( "min_lifetime" in event.content - and not isinstance(event.content.get("min_lifetime"), int) + and type(event.content["min_lifetime"]) is not int ) or ( "max_lifetime" in event.content - and not isinstance(event.content.get("max_lifetime"), int) + and type(event.content["max_lifetime"]) is not int ): # Ignore the event if one of the value isn't an integer. return From a134e626e43e9c31a4618d4164ba7d6242c0f803 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 31 Jan 2023 10:57:02 +0000 Subject: [PATCH 091/344] Reject boolean power levels (#14944) * Better test for bad values in power levels events The previous test only checked that Synapse didn't raise an exception, but didn't check that we had correctly interpreted the value of the dodgy power level. It also conflated two things: bad room notification levels, and bad user levels. There _is_ logic for converting the latter to integers, but we should test it separately. * Check we ignore types that don't convert to int * Handle `None` values in `notifications.room` * Changelog * Also test that bad values are rejected by event auth * Docstring * linter scripttttttttt * Test boolean values in PL content * Reject boolean power levels * Changelog --- changelog.d/14944.bugfix | 1 + synapse/event_auth.py | 4 ++-- synapse/events/utils.py | 6 +++--- synapse/federation/federation_base.py | 2 +- 4 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelog.d/14944.bugfix diff --git a/changelog.d/14944.bugfix b/changelog.d/14944.bugfix new file mode 100644 index 000000000000..5fe1fb322b40 --- /dev/null +++ b/changelog.d/14944.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.64 where boolean power levels were erroneously permitted in [v10 rooms](https://spec.matrix.org/v1.5/rooms/v10/). diff --git a/synapse/event_auth.py b/synapse/event_auth.py index c4a7b16413c5..e0be9f88cc9d 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -875,11 +875,11 @@ def _check_power_levels( "kick", "invite", }: - if not isinstance(v, int): + if type(v) is not int: raise SynapseError(400, f"{v!r} must be an integer.") if k in {"events", "notifications", "users"}: if not isinstance(v, collections.abc.Mapping) or not all( - isinstance(v, int) for v in v.values() + type(v) is int for v in v.values() ): raise SynapseError( 400, diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 52e4b467e876..ebf8c7ed83ba 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -648,10 +648,10 @@ def _copy_power_level_value_as_integer( ) -> None: """Set `power_levels[key]` to the integer represented by `old_value`. - :raises TypeError: if `old_value` is not an integer, nor a base-10 string + :raises TypeError: if `old_value` is neither an integer nor a base-10 string representation of an integer. """ - if isinstance(old_value, int): + if type(old_value) is int: power_levels[key] = old_value return @@ -679,7 +679,7 @@ def validate_canonicaljson(value: Any) -> None: * Floats * NaN, Infinity, -Infinity """ - if isinstance(value, int): + if type(value) is int: if value < CANONICALJSON_MIN_INT or CANONICALJSON_MAX_INT < value: raise SynapseError(400, "JSON integer out of range", Codes.BAD_JSON) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 6bd4742140c4..29fae716f589 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -280,7 +280,7 @@ def event_from_pdu_json(pdu_json: JsonDict, room_version: RoomVersion) -> EventB _strip_unsigned_values(pdu_json) depth = pdu_json["depth"] - if not isinstance(depth, int): + if type(depth) is not int: raise SynapseError(400, "Depth %r not an intger" % (depth,), Codes.BAD_JSON) if depth < 0: From 6d14fdc2710688014a7a66cc48485462c6e86a1e Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 31 Jan 2023 11:03:55 +0000 Subject: [PATCH 092/344] Make sqlite database migrations transactional again, part two (#14926) #14910 fixed the regression introduced by #13873 where sqlite database migrations would no longer run inside a transaction. However, it committed the transaction before Synapse updated its bookkeeping of which migrations have been run, which means that migrations may be run again after they have completed successfully. Leave the transaction open at the end of `executescript`, to restore the old, correct behaviour. Also make the PostgreSQL behaviour consistent with SQLite. Fixes #14909. Signed-off-by: Sean Quah --- changelog.d/14926.bugfix | 1 + synapse/storage/engines/_base.py | 5 +- synapse/storage/engines/postgres.py | 6 +- synapse/storage/engines/sqlite.py | 6 +- tests/storage/test_database.py | 96 +++++++++++++++++++++++++++++ 5 files changed, 109 insertions(+), 5 deletions(-) create mode 100644 changelog.d/14926.bugfix diff --git a/changelog.d/14926.bugfix b/changelog.d/14926.bugfix new file mode 100644 index 000000000000..f1f34cd6badb --- /dev/null +++ b/changelog.d/14926.bugfix @@ -0,0 +1 @@ +Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. diff --git a/synapse/storage/engines/_base.py b/synapse/storage/engines/_base.py index bc9ca3a53c40..0363cdc038f0 100644 --- a/synapse/storage/engines/_base.py +++ b/synapse/storage/engines/_base.py @@ -133,8 +133,9 @@ def executescript(cursor: CursorType, script: str) -> None: This is not provided by DBAPI2, and so needs engine-specific support. - Some database engines may automatically COMMIT the ongoing transaction both - before and after executing the script. + Any ongoing transaction is committed before executing the script in its own + transaction. The script transaction is left open and it is the responsibility of + the caller to commit it. """ ... diff --git a/synapse/storage/engines/postgres.py b/synapse/storage/engines/postgres.py index f9f562ea4544..b350f57ccb4a 100644 --- a/synapse/storage/engines/postgres.py +++ b/synapse/storage/engines/postgres.py @@ -220,5 +220,9 @@ def executescript(cursor: psycopg2.extensions.cursor, script: str) -> None: """Execute a chunk of SQL containing multiple semicolon-delimited statements. Psycopg2 seems happy to do this in DBAPI2's `execute()` function. + + For consistency with SQLite, any ongoing transaction is committed before + executing the script in its own transaction. The script transaction is + left open and it is the responsibility of the caller to commit it. """ - cursor.execute(script) + cursor.execute(f"COMMIT; BEGIN TRANSACTION; {script}") diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 2f7df85ce4fd..28751e89a5a5 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -135,14 +135,16 @@ def executescript(cursor: sqlite3.Cursor, script: str) -> None: > than one statement with it, it will raise a Warning. Use executescript() if > you want to execute multiple SQL statements with one call. - The script is wrapped in transaction control statemnets, since the docs for + The script is prefixed with a `BEGIN TRANSACTION`, since the docs for `executescript` warn: > If there is a pending transaction, an implicit COMMIT statement is executed > first. No other implicit transaction control is performed; any transaction > control must be added to sql_script. """ - cursor.executescript(f"BEGIN TRANSACTION;\n{script}\nCOMMIT;") + # The implementation of `executescript` can be found at + # https://github.com/python/cpython/blob/3.11/Modules/_sqlite/cursor.c#L1035. + cursor.executescript(f"BEGIN TRANSACTION; {script}") # Following functions taken from: https://github.com/coleifer/peewee diff --git a/tests/storage/test_database.py b/tests/storage/test_database.py index 543cce6b3e12..8cd7c89ca2f8 100644 --- a/tests/storage/test_database.py +++ b/tests/storage/test_database.py @@ -22,6 +22,7 @@ from synapse.server import HomeServer from synapse.storage.database import ( DatabasePool, + LoggingDatabaseConnection, LoggingTransaction, make_tuple_comparison_clause, ) @@ -37,6 +38,101 @@ def test_native_tuple_comparison(self) -> None: self.assertEqual(args, [1, 2]) +class ExecuteScriptTestCase(unittest.HomeserverTestCase): + """Tests for `BaseDatabaseEngine.executescript` implementations.""" + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.db_pool: DatabasePool = self.store.db_pool + self.get_success( + self.db_pool.runInteraction( + "create", + lambda txn: txn.execute("CREATE TABLE foo (name TEXT PRIMARY KEY)"), + ) + ) + + def test_transaction(self) -> None: + """Test that all statements are run in a single transaction.""" + + def run(conn: LoggingDatabaseConnection) -> None: + cur = conn.cursor(txn_name="test_transaction") + self.db_pool.engine.executescript( + cur, + ";".join( + [ + "INSERT INTO foo (name) VALUES ('transaction test')", + # This next statement will fail. When `executescript` is not + # transactional, the previous row will be observed later. + "INSERT INTO foo (name) VALUES ('transaction test')", + ] + ), + ) + + self.get_failure( + self.db_pool.runWithConnection(run), + self.db_pool.engine.module.IntegrityError, + ) + + self.assertIsNone( + self.get_success( + self.db_pool.simple_select_one_onecol( + "foo", + keyvalues={"name": "transaction test"}, + retcol="name", + allow_none=True, + ) + ), + "executescript is not running statements inside a transaction", + ) + + def test_commit(self) -> None: + """Test that the script transaction remains open and can be committed.""" + + def run(conn: LoggingDatabaseConnection) -> None: + cur = conn.cursor(txn_name="test_commit") + self.db_pool.engine.executescript( + cur, "INSERT INTO foo (name) VALUES ('commit test')" + ) + cur.execute("COMMIT") + + self.get_success(self.db_pool.runWithConnection(run)) + + self.assertIsNotNone( + self.get_success( + self.db_pool.simple_select_one_onecol( + "foo", + keyvalues={"name": "commit test"}, + retcol="name", + allow_none=True, + ) + ), + ) + + def test_rollback(self) -> None: + """Test that the script transaction remains open and can be rolled back.""" + + def run(conn: LoggingDatabaseConnection) -> None: + cur = conn.cursor(txn_name="test_rollback") + self.db_pool.engine.executescript( + cur, "INSERT INTO foo (name) VALUES ('rollback test')" + ) + cur.execute("ROLLBACK") + + self.get_success(self.db_pool.runWithConnection(run)) + + self.assertIsNone( + self.get_success( + self.db_pool.simple_select_one_onecol( + "foo", + keyvalues={"name": "rollback test"}, + retcol="name", + allow_none=True, + ) + ), + "executescript is not leaving the script transaction open", + ) + + class CallbacksTestCase(unittest.HomeserverTestCase): """Tests for transaction callbacks.""" From 805b641fb6b31e677278eaf6e27875eba5c2a3d3 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 31 Jan 2023 11:31:52 +0000 Subject: [PATCH 093/344] Fix "Re-starting finished log context" spam when creating events (#14947) `run_in_background` calls re-use the current logging context. When they are not awaited, they can complete after the current logging context has been marked as finished, which leads to log spam. Use `run_as_background_process` instead. Fixes one of the instances of #13090. Signed-off-by: Sean Quah --- changelog.d/14947.bugfix | 1 + synapse/handlers/message.py | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14947.bugfix diff --git a/changelog.d/14947.bugfix b/changelog.d/14947.bugfix new file mode 100644 index 000000000000..b9e768c44c83 --- /dev/null +++ b/changelog.d/14947.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where sending messages on servers with presence enabled would spam "Re-starting finished log context" log lines. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 6290f7f52316..e688e00575f3 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1939,7 +1939,9 @@ async def persist_and_notify_client_events( if event.type == EventTypes.Message: # We don't want to block sending messages on any presence code. This # matters as sometimes presence code can take a while. - run_in_background(self._bump_active_time, requester.user) + run_as_background_process( + "bump_presence_active_time", self._bump_active_time, requester.user + ) async def _notify() -> None: try: From 3b8574b4f250bac1e4d4cfbf6b1ceec83bc0bac2 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 31 Jan 2023 12:43:20 +0000 Subject: [PATCH 094/344] Tag /send_join responses to detect faster joins (#14950) * Tag /send_join responses to detect faster joins * Changelog * Define a proper SynapseTag * isort --- changelog.d/14950.misc | 1 + synapse/federation/federation_server.py | 6 ++++++ synapse/logging/opentracing.py | 5 +++++ 3 files changed, 12 insertions(+) create mode 100644 changelog.d/14950.misc diff --git a/changelog.d/14950.misc b/changelog.d/14950.misc new file mode 100644 index 000000000000..6602776b3ffd --- /dev/null +++ b/changelog.d/14950.misc @@ -0,0 +1 @@ +Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 3197939a363d..c9a6dfd1a4bf 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -62,7 +62,9 @@ run_in_background, ) from synapse.logging.opentracing import ( + SynapseTags, log_kv, + set_tag, start_active_span_from_edu, tag_args, trace, @@ -678,6 +680,10 @@ async def on_send_join_request( room_id: str, caller_supports_partial_state: bool = False, ) -> Dict[str, Any]: + set_tag( + SynapseTags.SEND_JOIN_RESPONSE_IS_PARTIAL_STATE, + caller_supports_partial_state, + ) await self._room_member_handler._join_rate_per_room_limiter.ratelimit( # type: ignore[has-type] requester=None, key=room_id, diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index a705af83565d..8ef9a0dda8e5 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -322,6 +322,11 @@ class SynapseTags: # The name of the external cache CACHE_NAME = "cache.name" + # Boolean. Present on /v2/send_join requests, omitted from all others. + # True iff partial state was requested and we provided (or intended to provide) + # partial state in the response. + SEND_JOIN_RESPONSE_IS_PARTIAL_STATE = "send_join.partial_state_response" + # Used to tag function arguments # # Tag a named arg. The name of the argument should be appended to this prefix. From 585180594b19920f0c4bc53599053c42c40d6511 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 31 Jan 2023 08:00:07 -0500 Subject: [PATCH 095/344] Fix running cargo bench & test in CI. (#14943) --- .github/workflows/tests.yml | 25 +++++++++++++++++++++++++ changelog.d/14943.feature | 1 + rust/benches/evaluator.rs | 10 ++++++++++ 3 files changed, 36 insertions(+) create mode 100644 changelog.d/14943.feature diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 28fc6d45e613..f184727cedb5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -566,6 +566,29 @@ jobs: - run: cargo test + # We want to ensure that the cargo benchmarks still compile, which requires a + # nightly compiler. + cargo-bench: + if: ${{ needs.changes.outputs.rust == 'true' }} + runs-on: ubuntu-latest + needs: + - linting-done + - changes + + steps: + - uses: actions/checkout@v3 + + - name: Install Rust + # There don't seem to be versioned releases of this action per se: for each rust + # version there is a branch which gets constantly rebased on top of master. + # We pin to a specific commit for paranoia's sake. + uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + with: + toolchain: nightly-2022-12-01 + - uses: Swatinem/rust-cache@v2 + + - run: cargo bench --no-run + # a job which marks all the other jobs as complete, thus allowing PRs to be merged. tests-done: if: ${{ always() }} @@ -577,6 +600,7 @@ jobs: - portdb - complement - cargo-test + - cargo-bench runs-on: ubuntu-latest steps: - uses: matrix-org/done-action@v2 @@ -588,3 +612,4 @@ jobs: skippable: | lint-newsfile cargo-test + cargo-bench diff --git a/changelog.d/14943.feature b/changelog.d/14943.feature new file mode 100644 index 000000000000..8293e99effbc --- /dev/null +++ b/changelog.d/14943.feature @@ -0,0 +1 @@ +Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 8c28bb0af3dc..6b16a3f75b75 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -13,6 +13,7 @@ // limitations under the License. #![feature(test)] +use std::collections::BTreeSet; use synapse::push::{ evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, PushRules, }; @@ -32,6 +33,8 @@ fn bench_match_exact(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, + BTreeSet::new(), + false, 10, Some(0), Default::default(), @@ -68,6 +71,8 @@ fn bench_match_word(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, + BTreeSet::new(), + false, 10, Some(0), Default::default(), @@ -104,6 +109,8 @@ fn bench_match_word_miss(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, + BTreeSet::new(), + false, 10, Some(0), Default::default(), @@ -140,6 +147,8 @@ fn bench_eval_message(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, + BTreeSet::new(), + false, 10, Some(0), Default::default(), @@ -156,6 +165,7 @@ fn bench_eval_message(b: &mut Bencher) { false, false, false, + false, ); b.iter(|| eval.run(&rules, Some("bob"), Some("person"))); From 9cb25b20e5403c3d3d03fe5bc028350355a97bd1 Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Tue, 31 Jan 2023 08:23:07 -0800 Subject: [PATCH 096/344] 1.76.0 --- CHANGES.md | 9 +++++++++ changelog.d/14677.doc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/14677.doc diff --git a/CHANGES.md b/CHANGES.md index 10be47158ec6..e8a248ae398e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.76.0 (2023-01-31) +=========================== + +Improved Documentation +---------------------- + +- Describe the ideas and the internal machinery behind faster joins. ([\#14677](https://github.com/matrix-org/synapse/issues/14677)) + + Synapse 1.76.0rc2 (2023-01-27) ============================== diff --git a/changelog.d/14677.doc b/changelog.d/14677.doc deleted file mode 100644 index 7c275c77a896..000000000000 --- a/changelog.d/14677.doc +++ /dev/null @@ -1 +0,0 @@ -Describe the ideas and the internal machinery behind faster joins. diff --git a/debian/changelog b/debian/changelog index 26234491f357..c678ae6c5c7e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.76.0) stable; urgency=medium + + * New Synapse release 1.76.0. + + -- Synapse Packaging team Tue, 31 Jan 2023 08:21:47 -0800 + matrix-synapse-py3 (1.76.0~rc2) stable; urgency=medium * New Synapse release 1.76.0rc2. diff --git a/pyproject.toml b/pyproject.toml index fd05a2b55004..8f7ced99a2ca 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.76.0rc2" +version = "1.76.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From e4bf5f3b05650dedbd1e638478fcb2dabfc5ab61 Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Tue, 31 Jan 2023 08:28:16 -0800 Subject: [PATCH 097/344] update changelog --- CHANGES.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index e8a248ae398e..cba7fbc651a5 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,15 +1,6 @@ Synapse 1.76.0 (2023-01-31) =========================== -Improved Documentation ----------------------- - -- Describe the ideas and the internal machinery behind faster joins. ([\#14677](https://github.com/matrix-org/synapse/issues/14677)) - - -Synapse 1.76.0rc2 (2023-01-27) -============================== - The 1.76 release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/upgrade.md#faster-joins-are-enabled-by-default) for more details. The upgrade from 1.75 to 1.76 changes the account data replication streams in a backwards-incompatible manner. Server operators running a multi-worker deployment should consult [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#changes-to-the-account-data-replication-streams). @@ -34,6 +25,16 @@ Synapse has to spend more effort to complete the join in the background. Once th - retrieve room history from before your join, if permitted by the room settings; and - access the full list of room members. + +Improved Documentation +---------------------- + +- Describe the ideas and the internal machinery behind faster joins. ([\#14677](https://github.com/matrix-org/synapse/issues/14677)) + + +Synapse 1.76.0rc2 (2023-01-27) +============================== + Bugfixes -------- From eafdb12dd8db985fbe1ac27ca75d28af8d4e4c5d Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Tue, 31 Jan 2023 08:35:22 -0800 Subject: [PATCH 098/344] update changelog and upgrade notes --- CHANGES.md | 2 +- docs/upgrade.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index cba7fbc651a5..5f0c50851f23 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,7 +1,7 @@ Synapse 1.76.0 (2023-01-31) =========================== -The 1.76 release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/upgrade.md#faster-joins-are-enabled-by-default) for more details. +The 1.76 release is the first to enable faster joins ([MSC3706](https://github.com/matrix-org/matrix-spec-proposals/pull/3706) and [MSC3902](https://github.com/matrix-org/matrix-spec-proposals/pull/3902)) by default. Admins can opt-out: see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#faster-joins-are-enabled-by-default) for more details. The upgrade from 1.75 to 1.76 changes the account data replication streams in a backwards-incompatible manner. Server operators running a multi-worker deployment should consult [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.76/docs/upgrade.md#changes-to-the-account-data-replication-streams). diff --git a/docs/upgrade.md b/docs/upgrade.md index 6316db563b1a..bc143444bed6 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -92,7 +92,7 @@ process, for example: ## Faster joins are enabled by default -When joining a room for the first time, Synapse 1.76.0rc1 will request a partial join from the other server by default. Previously, server admins had to opt-in to this using an experimental config flag. +When joining a room for the first time, Synapse 1.76.0 will request a partial join from the other server by default. Previously, server admins had to opt-in to this using an experimental config flag. Server admins can opt out of this feature for the time being by setting From 73403d5e5e7740e96834a364f81ad8e00e3f2f0e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 1 Feb 2023 06:24:02 -0500 Subject: [PATCH 099/344] Fix inconsistencies between MSC3952 and implementation. (#14957) * Correct the push rule IDs. * Removes the sound tweak for room notifications. --- changelog.d/14957.feature | 1 + rust/src/push/base_rules.rs | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14957.feature diff --git a/changelog.d/14957.feature b/changelog.d/14957.feature new file mode 100644 index 000000000000..8293e99effbc --- /dev/null +++ b/changelog.d/14957.feature @@ -0,0 +1 @@ +Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 880eed0ef405..49add4e9519a 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -132,7 +132,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default_enabled: true, }, PushRule { - rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mentioned"), + rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"), priority_class: 5, conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::IsUserMention)]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), @@ -148,7 +148,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ default_enabled: true, }, PushRule { - rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mentioned"), + rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"), priority_class: 5, conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::IsRoomMention), @@ -156,7 +156,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ key: Cow::Borrowed("room"), }), ]), - actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]), default: true, default_enabled: true, }, From 1958f9de45acee2792fd8219462cea9162624464 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 1 Feb 2023 12:36:04 +0000 Subject: [PATCH 100/344] lnav config for synpase logs (#14953) --- changelog.d/14953.misc | 1 + contrib/lnav/README.md | 47 +++++++++++++++++++ contrib/lnav/synapse-log-format.json | 67 ++++++++++++++++++++++++++++ 3 files changed, 115 insertions(+) create mode 100644 changelog.d/14953.misc create mode 100644 contrib/lnav/README.md create mode 100644 contrib/lnav/synapse-log-format.json diff --git a/changelog.d/14953.misc b/changelog.d/14953.misc new file mode 100644 index 000000000000..1741e1627caa --- /dev/null +++ b/changelog.d/14953.misc @@ -0,0 +1 @@ +Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. diff --git a/contrib/lnav/README.md b/contrib/lnav/README.md new file mode 100644 index 000000000000..5230a191d28e --- /dev/null +++ b/contrib/lnav/README.md @@ -0,0 +1,47 @@ +# `lnav` config for Synapse logs + +[lnav](https://lnav.org/) is a log-viewing tool. It is particularly useful when +you need to interleave multiple log files, or for exploring a large log file +with regex filters. The downside is that it is not as ubiquitous as tools like +`less`, `grep`, etc. + +This directory contains an `lnav` [log format definition]( + https://docs.lnav.org/en/v0.10.1/formats.html#defining-a-new-format +) for Synapse logs as +emitted by Synapse with the default [logging configuration]( + https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#log_config +). It supports lnav 0.10.1 because that's what's packaged by my distribution. + +This should allow lnav: + +- to interpret timestamps, allowing log interleaving; +- to interpret log severity levels, allowing colouring by log level(!!!); +- to interpret request IDs, allowing you to skip through a specific request; and +- to highlight room, event and user IDs in logs. + +See also https://gist.github.com/benje/e2ab750b0a81d11920d83af637d289f7 for a + similar example. + +## Example + +[![asciicast](https://asciinema.org/a/556133.svg)](https://asciinema.org/a/556133) + +## Tips + +- `lnav -i /path/to/synapse/checkout/contrib/lnav/synapse-log-format.json` +- `lnav my_synapse_log_file` or `lnav synapse_log_files.*`, etc. +- `lnav --help` for CLI help. + +Within lnav itself: + +- `?` for help within lnav itself. +- `q` to quit. +- `/` to search a-la `less` and `vim`, then `n` and `N` to continue searching + down and up. +- Use `o` and `O` to skip through logs based on the request ID (`POST-1234`, or + else the value of the [`request_id_header`]( + https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html?highlight=request_id_header#listeners + ) header). This may get confused if the same request ID is repeated among + multiple files or process restarts. +- ??? +- Profit diff --git a/contrib/lnav/synapse-log-format.json b/contrib/lnav/synapse-log-format.json new file mode 100644 index 000000000000..ad7017ee5ec2 --- /dev/null +++ b/contrib/lnav/synapse-log-format.json @@ -0,0 +1,67 @@ +{ + "$schema": "https://lnav.org/schemas/format-v1.schema.json", + "synapse": { + "title": "Synapse logs", + "description": "Logs output by Synapse, a Matrix homesever, under its default logging config.", + "regex": { + "log": { + "pattern": ".*(?\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2},\\d{3}) - (?.+) - (?\\d+) - (?\\w+) - (?.+) - (?.*)" + } + }, + "json": false, + "timestamp-field": "timestamp", + "timestamp-format": [ + "%Y-%m-%d %H:%M:%S,%L" + ], + "level-field": "level", + "body-field": "body", + "opid-field": "context", + "level": { + "critical": "CRITICAL", + "error": "ERROR", + "warning": "WARNING", + "info": "INFO", + "debug": "DEBUG" + }, + "sample": [ + { + "line": "my-matrix-server-generic-worker-4 | 2023-01-27 09:47:09,818 - synapse.replication.tcp.client - 381 - ERROR - PUT-32992 - Timed out waiting for stream receipts", + "level": "error" + }, + { + "line": "my-matrix-server-federation-sender-1 | 2023-01-25 20:56:20,995 - synapse.http.matrixfederationclient - 709 - WARNING - federation_transaction_transmission_loop-3 - {PUT-O-3} [example.com] Request failed: PUT matrix://example.com/_matrix/federation/v1/send/1674680155797: HttpResponseException('403: Forbidden')", + "level": "warning" + }, + { + "line": "my-matrix-server | 2023-01-25 20:55:54,433 - synapse.storage.databases - 66 - INFO - main - [database config 'master']: Checking database server", + "level": "info" + }, + { + "line": "my-matrix-server | 2023-01-26 15:08:40,447 - synapse.access.http.8008 - 460 - INFO - PUT-74929 - 0.0.0.0 - 8008 - {@alice:example.com} Processed request: 0.011sec/0.000sec (0.000sec, 0.000sec) (0.001sec/0.008sec/3) 2B 200 \"PUT /_matrix/client/r0/user/%40alice%3Atexample.com/account_data/im.vector.setting.breadcrumbs HTTP/1.0\" \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Element/1.11.20 Chrome/108.0.5359.179 Electron/22.0.3 Safari/537.36\" [0 dbevts]", + "level": "info" + } + ], + "highlights": { + "user_id": { + "pattern": "(@|%40)[^:% ]+(:|%3A)[\\[\\]0-9a-zA-Z.\\-:]+(:\\d{1,5})?(? Date: Wed, 1 Feb 2023 16:45:19 +0100 Subject: [PATCH 101/344] Add more user information to export-data command. (#14894) * The user's profile information. * The user's devices. * The user's connections / IP address information. --- .ci/scripts/test_export_data_command.sh | 10 ++-- changelog.d/14894.feature | 1 + docs/usage/administration/admin_faq.md | 80 ++++++++++++++++++++----- synapse/app/admin_cmd.py | 32 +++++++++- synapse/handlers/admin.py | 43 +++++++++++++ tests/handlers/test_admin.py | 60 +++++++++++++++++++ 6 files changed, 206 insertions(+), 20 deletions(-) create mode 100644 changelog.d/14894.feature diff --git a/.ci/scripts/test_export_data_command.sh b/.ci/scripts/test_export_data_command.sh index 9f6c49acff73..36f836345cae 100755 --- a/.ci/scripts/test_export_data_command.sh +++ b/.ci/scripts/test_export_data_command.sh @@ -23,8 +23,9 @@ poetry run python -m synapse.app.admin_cmd -c .ci/sqlite-config.yaml export-dat --output-directory /tmp/export_data # Test that the output directory exists and contains the rooms directory -dir="/tmp/export_data/rooms" -if [ -d "$dir" ]; then +dir_r="/tmp/export_data/rooms" +dir_u="/tmp/export_data/user_data" +if [ -d "$dir_r" ] && [ -d "$dir_u" ]; then echo "Command successful, this test passes" else echo "No output directories found, the command fails against a sqlite database." @@ -43,8 +44,9 @@ poetry run python -m synapse.app.admin_cmd -c .ci/postgres-config.yaml export-d --output-directory /tmp/export_data2 # Test that the output directory exists and contains the rooms directory -dir2="/tmp/export_data2/rooms" -if [ -d "$dir2" ]; then +dir_r2="/tmp/export_data2/rooms" +dir_u2="/tmp/export_data2/user_data" +if [ -d "$dir_r2" ] && [ -d "$dir_u2" ]; then echo "Command successful, this test passes" else echo "No output directories found, the command fails against a postgres database." diff --git a/changelog.d/14894.feature b/changelog.d/14894.feature new file mode 100644 index 000000000000..d22741d079af --- /dev/null +++ b/changelog.d/14894.feature @@ -0,0 +1 @@ +Adds profile information, devices and connections to the user data export via command line. \ No newline at end of file diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md index 18ce6171dbba..7a2774119964 100644 --- a/docs/usage/administration/admin_faq.md +++ b/docs/usage/administration/admin_faq.md @@ -2,13 +2,19 @@ How do I become a server admin? --- -If your server already has an admin account you should use the [User Admin API](../../admin_api/user_admin_api.md#change-whether-a-user-is-a-server-administrator-or-not) to promote other accounts to become admins. +If your server already has an admin account you should use the +[User Admin API](../../admin_api/user_admin_api.md#change-whether-a-user-is-a-server-administrator-or-not) +to promote other accounts to become admins. -If you don't have any admin accounts yet you won't be able to use the admin API, so you'll have to edit the database manually. Manually editing the database is generally not recommended so once you have an admin account: use the admin APIs to make further changes. +If you don't have any admin accounts yet you won't be able to use the admin API, +so you'll have to edit the database manually. Manually editing the database is +generally not recommended so once you have an admin account: use the admin APIs +to make further changes. ```sql UPDATE users SET admin = 1 WHERE name = '@foo:bar.com'; ``` + What servers are my server talking to? --- Run this sql query on your db: @@ -36,8 +42,38 @@ How can I export user data? --- Synapse includes a Python command to export data for a specific user. It takes the homeserver configuration file and the full Matrix ID of the user to export: + ```console -python -m synapse.app.admin_cmd -c export-data +python -m synapse.app.admin_cmd -c export-data --output-directory +``` + +If you uses [Poetry](../../development/dependencies.md#managing-dependencies-with-poetry) +to run Synapse: + +```console +poetry run python -m synapse.app.admin_cmd -c export-data --output-directory +``` + +The directory to store the export data in can be customised with the +`--output-directory` parameter; ensure that the provided directory is +empty. If this parameter is not provided, Synapse defaults to creating +a temporary directory (which starts with "synapse-exfiltrate") in `/tmp`, +`/var/tmp`, or `/usr/tmp`, in that order. + +The exported data has the following layout: + +``` +output-directory +├───rooms +│ └─── +│ ├───events +│ ├───state +│ ├───invite_state +│ └───knock_state +└───user_data + ├───connections + ├───devices + └───profile ``` Manually resetting passwords @@ -50,21 +86,29 @@ I have a problem with my server. Can I just delete my database and start again? --- Deleting your database is unlikely to make anything better. -It's easy to make the mistake of thinking that you can start again from a clean slate by dropping your database, but things don't work like that in a federated network: lots of other servers have information about your server. +It's easy to make the mistake of thinking that you can start again from a clean +slate by dropping your database, but things don't work like that in a federated +network: lots of other servers have information about your server. -For example: other servers might think that you are in a room, your server will think that you are not, and you'll probably be unable to interact with that room in a sensible way ever again. +For example: other servers might think that you are in a room, your server will +think that you are not, and you'll probably be unable to interact with that room +in a sensible way ever again. -In general, there are better solutions to any problem than dropping the database. Come and seek help in https://matrix.to/#/#synapse:matrix.org. +In general, there are better solutions to any problem than dropping the database. +Come and seek help in https://matrix.to/#/#synapse:matrix.org. There are two exceptions when it might be sensible to delete your database and start again: -* You have *never* joined any rooms which are federated with other servers. For instance, a local deployment which the outside world can't talk to. -* You are changing the `server_name` in the homeserver configuration. In effect this makes your server a completely new one from the point of view of the network, so in this case it makes sense to start with a clean database. +* You have *never* joined any rooms which are federated with other servers. For +instance, a local deployment which the outside world can't talk to. +* You are changing the `server_name` in the homeserver configuration. In effect +this makes your server a completely new one from the point of view of the network, +so in this case it makes sense to start with a clean database. (In both cases you probably also want to clear out the media_store.) I've stuffed up access to my room, how can I delete it to free up the alias? --- Using the following curl command: -``` +```console curl -H 'Authorization: Bearer ' -X DELETE https://matrix.org/_matrix/client/r0/directory/room/ ``` `` - can be obtained in riot by looking in the riot settings, down the bottom is: @@ -75,19 +119,25 @@ Access Token:\ How can I find the lines corresponding to a given HTTP request in my homeserver log? --- -Synapse tags each log line according to the HTTP request it is processing. When it finishes processing each request, it logs a line containing the words `Processed request: `. For example: +Synapse tags each log line according to the HTTP request it is processing. When +it finishes processing each request, it logs a line containing the words +`Processed request: `. For example: ``` 2019-02-14 22:35:08,196 - synapse.access.http.8008 - 302 - INFO - GET-37 - ::1 - 8008 - {@richvdh:localhost} Processed request: 0.173sec/0.001sec (0.002sec, 0.000sec) (0.027sec/0.026sec/2) 687B 200 "GET /_matrix/client/r0/sync HTTP/1.1" "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36" [0 dbevts]" ``` -Here we can see that the request has been tagged with `GET-37`. (The tag depends on the method of the HTTP request, so might start with `GET-`, `PUT-`, `POST-`, `OPTIONS-` or `DELETE-`.) So to find all lines corresponding to this request, we can do: +Here we can see that the request has been tagged with `GET-37`. (The tag depends +on the method of the HTTP request, so might start with `GET-`, `PUT-`, `POST-`, +`OPTIONS-` or `DELETE-`.) So to find all lines corresponding to this request, we can do: -``` +```console grep 'GET-37' homeserver.log ``` -If you want to paste that output into a github issue or matrix room, please remember to surround it with triple-backticks (```) to make it legible (see [quoting code](https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code)). +If you want to paste that output into a github issue or matrix room, please +remember to surround it with triple-backticks (```) to make it legible +(see [quoting code](https://help.github.com/en/articles/basic-writing-and-formatting-syntax#quoting-code)). What do all those fields in the 'Processed' line mean? @@ -127,7 +177,7 @@ This is normally caused by a misconfiguration in your reverse-proxy. See [the re Help!! Synapse is slow and eats all my RAM/CPU! ------------------------------------------------ +--- First, ensure you are running the latest version of Synapse, using Python 3 with a [PostgreSQL database](../../postgres.md). @@ -169,7 +219,7 @@ in the Synapse config file: [see here](../configuration/config_documentation.md# Running out of File Handles ---------------------------- +--- If Synapse runs out of file handles, it typically fails badly - live-locking at 100% CPU, and/or failing to accept new TCP connections (blocking the diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 165d1c5db06b..fe7afb94755e 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -35,6 +35,7 @@ ApplicationServiceTransactionWorkerStore, ApplicationServiceWorkerStore, ) +from synapse.storage.databases.main.client_ips import ClientIpWorkerStore from synapse.storage.databases.main.deviceinbox import DeviceInboxWorkerStore from synapse.storage.databases.main.devices import DeviceWorkerStore from synapse.storage.databases.main.event_federation import EventFederationWorkerStore @@ -43,6 +44,7 @@ ) from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.filtering import FilteringWorkerStore +from synapse.storage.databases.main.profile import ProfileWorkerStore from synapse.storage.databases.main.push_rule import PushRulesWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.registration import RegistrationWorkerStore @@ -54,7 +56,7 @@ from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.storage.databases.main.tags import TagsWorkerStore from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore -from synapse.types import StateMap +from synapse.types import JsonDict, StateMap from synapse.util import SYNAPSE_VERSION from synapse.util.logcontext import LoggingContext @@ -63,6 +65,7 @@ class AdminCmdSlavedStore( FilteringWorkerStore, + ClientIpWorkerStore, DeviceWorkerStore, TagsWorkerStore, DeviceInboxWorkerStore, @@ -82,6 +85,7 @@ class AdminCmdSlavedStore( EventsWorkerStore, RegistrationWorkerStore, RoomWorkerStore, + ProfileWorkerStore, ): def __init__( self, @@ -192,6 +196,32 @@ def write_knock( for event in state.values(): print(json.dumps(event), file=f) + def write_profile(self, profile: JsonDict) -> None: + user_directory = os.path.join(self.base_directory, "user_data") + os.makedirs(user_directory, exist_ok=True) + profile_file = os.path.join(user_directory, "profile") + + with open(profile_file, "a") as f: + print(json.dumps(profile), file=f) + + def write_devices(self, devices: List[JsonDict]) -> None: + user_directory = os.path.join(self.base_directory, "user_data") + os.makedirs(user_directory, exist_ok=True) + device_file = os.path.join(user_directory, "devices") + + for device in devices: + with open(device_file, "a") as f: + print(json.dumps(device), file=f) + + def write_connections(self, connections: List[JsonDict]) -> None: + user_directory = os.path.join(self.base_directory, "user_data") + os.makedirs(user_directory, exist_ok=True) + connection_file = os.path.join(user_directory, "connections") + + for connection in connections: + with open(connection_file, "a") as f: + print(json.dumps(connection), file=f) + def finished(self) -> str: return self.base_directory diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index c81ea347583d..b03c214b145a 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -30,6 +30,7 @@ class AdminHandler: def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main + self._device_handler = hs.get_device_handler() self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state self._msc3866_enabled = hs.config.experimental.msc3866.enabled @@ -247,6 +248,21 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> ) writer.write_state(room_id, event_id, state) + # Get the user profile + profile = await self.get_user(UserID.from_string(user_id)) + if profile is not None: + writer.write_profile(profile) + + # Get all devices the user has + devices = await self._device_handler.get_devices_by_user(user_id) + writer.write_devices(devices) + + # Get all connections the user has + connections = await self.get_whois(UserID.from_string(user_id)) + writer.write_connections( + connections["devices"][""]["sessions"][0]["connections"] + ) + return writer.finished() @@ -297,6 +313,33 @@ def write_knock( """ raise NotImplementedError() + @abc.abstractmethod + def write_profile(self, profile: JsonDict) -> None: + """Write the profile of a user. + + Args: + profile: The user profile. + """ + raise NotImplementedError() + + @abc.abstractmethod + def write_devices(self, devices: List[JsonDict]) -> None: + """Write the devices of a user. + + Args: + devices: The list of devices. + """ + raise NotImplementedError() + + @abc.abstractmethod + def write_connections(self, connections: List[JsonDict]) -> None: + """Write the connections of a user. + + Args: + connections: The list of connections / sessions. + """ + raise NotImplementedError() + @abc.abstractmethod def finished(self) -> Any: """Called when all data has successfully been exported and written. diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index c1579dac610f..6f300b8e1119 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -38,6 +38,7 @@ class ExfiltrateData(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_handler = hs.get_admin_handler() + self._store = hs.get_datastores().main self.user1 = self.register_user("user1", "password") self.token1 = self.login("user1", "password") @@ -236,3 +237,62 @@ def test_knock(self) -> None: self.assertEqual(args[0], room_id) self.assertEqual(args[1].content["membership"], "knock") self.assertTrue(args[2]) # Assert there is at least one bit of state + + def test_profile(self) -> None: + """Tests that user profile get exported.""" + writer = Mock() + + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) + + writer.write_events.assert_not_called() + writer.write_profile.assert_called_once() + + # check only a few values, not all available + args = writer.write_profile.call_args[0] + self.assertEqual(args[0]["name"], self.user2) + self.assertIn("displayname", args[0]) + self.assertIn("avatar_url", args[0]) + self.assertIn("threepids", args[0]) + self.assertIn("external_ids", args[0]) + self.assertIn("creation_ts", args[0]) + + def test_devices(self) -> None: + """Tests that user devices get exported.""" + writer = Mock() + + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) + + writer.write_events.assert_not_called() + writer.write_devices.assert_called_once() + + args = writer.write_devices.call_args[0] + self.assertEqual(len(args[0]), 1) + self.assertEqual(args[0][0]["user_id"], self.user2) + self.assertIn("device_id", args[0][0]) + self.assertIsNone(args[0][0]["display_name"]) + self.assertIsNone(args[0][0]["last_seen_user_agent"]) + self.assertIsNone(args[0][0]["last_seen_ts"]) + self.assertIsNone(args[0][0]["last_seen_ip"]) + + def test_connections(self) -> None: + """Tests that user sessions / connections get exported.""" + # Insert a user IP + self.get_success( + self._store.insert_client_ip( + self.user2, "access_token", "ip", "user_agent", "MY_DEVICE" + ) + ) + + writer = Mock() + + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) + + writer.write_events.assert_not_called() + writer.write_connections.assert_called_once() + + args = writer.write_connections.call_args[0] + self.assertEqual(len(args[0]), 1) + self.assertEqual(args[0][0]["ip"], "ip") + self.assertEqual(args[0][0]["user_agent"], "user_agent") + self.assertGreater(args[0][0]["last_seen"], 0) + self.assertNotIn("access_token", args[0][0]) From bb675913f005758aaabd756dfd1e0b2f347bb8d6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Feb 2023 20:06:28 +0000 Subject: [PATCH 102/344] Bump docker/build-push-action from 3 to 4 (#14952) * Bump docker/build-push-action from 3 to 4 Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 3 to 4. - [Release notes](https://github.com/docker/build-push-action/releases) - [Commits](https://github.com/docker/build-push-action/compare/v3...v4) --- updated-dependencies: - dependency-name: docker/build-push-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/docker.yml | 2 +- changelog.d/14952.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14952.misc diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 49427ab50d0f..4bbe5decf88e 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -48,7 +48,7 @@ jobs: type=pep440,pattern={{raw}} - name: Build and push all platforms - uses: docker/build-push-action@v3 + uses: docker/build-push-action@v4 with: push: true labels: "gitsha1=${{ github.sha }}" diff --git a/changelog.d/14952.misc b/changelog.d/14952.misc new file mode 100644 index 000000000000..4add8446d250 --- /dev/null +++ b/changelog.d/14952.misc @@ -0,0 +1 @@ +Bump docker/build-push-action from 3 to 4. From 230a831c734246aa4db7bd842947c7ea277ca126 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 1 Feb 2023 15:45:10 -0500 Subject: [PATCH 103/344] Attempt to delete more duplicate rows in receipts_linearized table. (#14915) The previous assumption was that the stream_id column was unique (for a room ID, receipt type, user ID tuple), but this turned out to be incorrect. Now find the max stream ID, then map this back to a database-specific row identifier and delete other rows which match the (room ID, receipt type, user ID) tuple, but *not* the row ID. --- changelog.d/14915.bugfix | 1 + synapse/storage/databases/main/receipts.py | 34 ++++++++++++++----- tests/storage/databases/main/test_receipts.py | 4 ++- 3 files changed, 30 insertions(+), 9 deletions(-) create mode 100644 changelog.d/14915.bugfix diff --git a/changelog.d/14915.bugfix b/changelog.d/14915.bugfix new file mode 100644 index 000000000000..4969e5450c3f --- /dev/null +++ b/changelog.d/14915.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 3468f354e60f..29972d520413 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -941,10 +941,14 @@ async def _background_receipts_linearized_unique_index( receipts.""" def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None: + if isinstance(self.database_engine, PostgresEngine): + ROW_ID_NAME = "ctid" + else: + ROW_ID_NAME = "rowid" + # Identify any duplicate receipts arising from # https://github.com/matrix-org/synapse/issues/14406. - # We expect the following query to use the per-thread receipt index and take - # less than a minute. + # The following query takes less than a minute on matrix.org. sql = """ SELECT MAX(stream_id), room_id, receipt_type, user_id FROM receipts_linearized @@ -956,19 +960,33 @@ def _remote_duplicate_receipts_txn(txn: LoggingTransaction) -> None: duplicate_keys = cast(List[Tuple[int, str, str, str]], list(txn)) # Then remove duplicate receipts, keeping the one with the highest - # `stream_id`. There should only be a single receipt with any given - # `stream_id`. - for max_stream_id, room_id, receipt_type, user_id in duplicate_keys: - sql = """ + # `stream_id`. Since there might be duplicate rows with the same + # `stream_id`, we delete by the ctid instead. + for stream_id, room_id, receipt_type, user_id in duplicate_keys: + sql = f""" + SELECT {ROW_ID_NAME} + FROM receipts_linearized + WHERE + room_id = ? AND + receipt_type = ? AND + user_id = ? AND + thread_id IS NULL AND + stream_id = ? + LIMIT 1 + """ + txn.execute(sql, (room_id, receipt_type, user_id, stream_id)) + row_id = cast(Tuple[str], txn.fetchone())[0] + + sql = f""" DELETE FROM receipts_linearized WHERE room_id = ? AND receipt_type = ? AND user_id = ? AND thread_id IS NULL AND - stream_id < ? + {ROW_ID_NAME} != ? """ - txn.execute(sql, (room_id, receipt_type, user_id, max_stream_id)) + txn.execute(sql, (room_id, receipt_type, user_id, row_id)) await self.db_pool.runInteraction( self.RECEIPTS_LINEARIZED_UNIQUE_INDEX_UPDATE_NAME, diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py index 68026e283046..ac77aec003b1 100644 --- a/tests/storage/databases/main/test_receipts.py +++ b/tests/storage/databases/main/test_receipts.py @@ -168,7 +168,9 @@ def test_background_receipts_linearized_unique_index(self) -> None: {"stream_id": 6, "event_id": "$some_event"}, ], (self.other_room_id, "m.read", self.user_id): [ - {"stream_id": 7, "event_id": "$some_event"} + # It is possible for stream IDs to be duplicated. + {"stream_id": 7, "event_id": "$some_event"}, + {"stream_id": 7, "event_id": "$some_event"}, ], }, expected_unique_receipts={ From 1182ae50635db94d3c9c47990a0befcbf6306b62 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 1 Feb 2023 16:35:24 -0500 Subject: [PATCH 104/344] Add helper to parse an enum from query args & use it. (#14956) The `parse_enum` helper pulls an enum value from the query string (by delegating down to the parse_string helper with values generated from the enum). This is used to pull out "f" and "b" in most places and then we thread the resulting Direction enum throughout more code. --- changelog.d/14956.misc | 1 + synapse/federation/federation_client.py | 15 ++-- synapse/federation/federation_server.py | 12 +++- synapse/federation/transport/client.py | 8 +-- .../federation/transport/server/federation.py | 7 +- synapse/handlers/account_data.py | 2 +- synapse/handlers/receipts.py | 2 +- synapse/handlers/room.py | 9 +-- synapse/http/servlet.py | 70 +++++++++++++++++++ synapse/rest/admin/event_reports.py | 12 +--- synapse/rest/admin/federation.py | 7 +- synapse/rest/admin/media.py | 21 ++++-- synapse/rest/admin/rooms.py | 16 ++--- synapse/rest/admin/statistics.py | 11 +-- synapse/rest/admin/users.py | 5 +- synapse/rest/client/relations.py | 3 +- synapse/rest/client/room.py | 5 +- synapse/storage/databases/main/__init__.py | 5 +- .../storage/databases/main/events_worker.py | 11 ++- .../databases/main/media_repository.py | 5 +- synapse/storage/databases/main/room.py | 9 +-- synapse/storage/databases/main/stats.py | 6 +- .../storage/databases/main/transactions.py | 13 ++-- synapse/streams/config.py | 12 +--- tests/rest/admin/test_event_reports.py | 5 +- 25 files changed, 176 insertions(+), 96 deletions(-) create mode 100644 changelog.d/14956.misc diff --git a/changelog.d/14956.misc b/changelog.d/14956.misc new file mode 100644 index 000000000000..9f5384e60e78 --- /dev/null +++ b/changelog.d/14956.misc @@ -0,0 +1 @@ +Add missing type hints. \ No newline at end of file diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index feb32e40e52f..8493ffc2e5ee 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -37,7 +37,7 @@ import attr from prometheus_client import Counter -from synapse.api.constants import EventContentFields, EventTypes, Membership +from synapse.api.constants import Direction, EventContentFields, EventTypes, Membership from synapse.api.errors import ( CodeMessageException, Codes, @@ -1680,7 +1680,12 @@ async def send_request( return result async def timestamp_to_event( - self, *, destinations: List[str], room_id: str, timestamp: int, direction: str + self, + *, + destinations: List[str], + room_id: str, + timestamp: int, + direction: Direction, ) -> Optional["TimestampToEventResponse"]: """ Calls each remote federating server from `destinations` asking for their closest @@ -1693,7 +1698,7 @@ async def timestamp_to_event( room_id: Room to fetch the event from timestamp: The point in time (inclusive) we should navigate from in the given direction to find the closest event. - direction: ["f"|"b"] to indicate whether we should navigate forward + direction: indicates whether we should navigate forward or backward from the given timestamp to find the closest event. Returns: @@ -1738,7 +1743,7 @@ async def _timestamp_to_event_from_destination( return None async def _timestamp_to_event_from_destination( - self, destination: str, room_id: str, timestamp: int, direction: str + self, destination: str, room_id: str, timestamp: int, direction: Direction ) -> "TimestampToEventResponse": """ Calls a remote federating server at `destination` asking for their @@ -1751,7 +1756,7 @@ async def _timestamp_to_event_from_destination( room_id: Room to fetch the event from timestamp: The point in time (inclusive) we should navigate from in the given direction to find the closest event. - direction: ["f"|"b"] to indicate whether we should navigate forward + direction: indicates whether we should navigate forward or backward from the given timestamp to find the closest event. Returns: diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index c9a6dfd1a4bf..8d36172484d6 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -34,7 +34,13 @@ from twisted.internet.abstract import isIPAddress from twisted.python import failure -from synapse.api.constants import EduTypes, EventContentFields, EventTypes, Membership +from synapse.api.constants import ( + Direction, + EduTypes, + EventContentFields, + EventTypes, + Membership, +) from synapse.api.errors import ( AuthError, Codes, @@ -218,7 +224,7 @@ async def on_backfill_request( return 200, res async def on_timestamp_to_event_request( - self, origin: str, room_id: str, timestamp: int, direction: str + self, origin: str, room_id: str, timestamp: int, direction: Direction ) -> Tuple[int, Dict[str, Any]]: """When we receive a federated `/timestamp_to_event` request, handle all of the logic for validating and fetching the event. @@ -228,7 +234,7 @@ async def on_timestamp_to_event_request( room_id: Room to fetch the event from timestamp: The point in time (inclusive) we should navigate from in the given direction to find the closest event. - direction: ["f"|"b"] to indicate whether we should navigate forward + direction: indicates whether we should navigate forward or backward from the given timestamp to find the closest event. Returns: diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 682666ab3602..c05d598b70cf 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -32,7 +32,7 @@ import attr import ijson -from synapse.api.constants import Membership +from synapse.api.constants import Direction, Membership from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.api.room_versions import RoomVersion from synapse.api.urls import ( @@ -169,7 +169,7 @@ async def backfill( ) async def timestamp_to_event( - self, destination: str, room_id: str, timestamp: int, direction: str + self, destination: str, room_id: str, timestamp: int, direction: Direction ) -> Union[JsonDict, List]: """ Calls a remote federating server at `destination` asking for their @@ -180,7 +180,7 @@ async def timestamp_to_event( room_id: Room to fetch the event from timestamp: The point in time (inclusive) we should navigate from in the given direction to find the closest event. - direction: ["f"|"b"] to indicate whether we should navigate forward + direction: indicates whether we should navigate forward or backward from the given timestamp to find the closest event. Returns: @@ -194,7 +194,7 @@ async def timestamp_to_event( room_id, ) - args = {"ts": [str(timestamp)], "dir": [direction]} + args = {"ts": [str(timestamp)], "dir": [direction.value]} remote_response = await self.client.get_json( destination, path=path, args=args, try_trailing_slash_on_400=True diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index 17c427387e03..f7ca87adc491 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -26,7 +26,7 @@ from typing_extensions import Literal -from synapse.api.constants import EduTypes +from synapse.api.constants import Direction, EduTypes from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersions from synapse.api.urls import FEDERATION_UNSTABLE_PREFIX, FEDERATION_V2_PREFIX @@ -234,9 +234,10 @@ async def on_GET( room_id: str, ) -> Tuple[int, JsonDict]: timestamp = parse_integer_from_args(query, "ts", required=True) - direction = parse_string_from_args( - query, "dir", default="f", allowed_values=["f", "b"], required=True + direction_str = parse_string_from_args( + query, "dir", allowed_values=["f", "b"], required=True ) + direction = Direction(direction_str) return await self.handler.on_timestamp_to_event_request( origin, room_id, timestamp, direction diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index d500b21809fa..67e789eef70e 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -314,7 +314,7 @@ class AccountDataEventSource(EventSource[int, JsonDict]): def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main - def get_current_key(self, direction: str = "f") -> int: + def get_current_key(self) -> int: return self.store.get_max_account_data_stream_id() async def get_new_events( diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 6a4fed115671..04c61ae3ddbd 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -315,5 +315,5 @@ async def get_new_events_as( return events, to_key - def get_current_key(self, direction: str = "f") -> int: + def get_current_key(self) -> int: return self.store.get_max_receipt_stream_id() diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 60a6d9cf3c18..7ba7c4ff0714 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -27,6 +27,7 @@ import synapse.events.snapshot from synapse.api.constants import ( + Direction, EventContentFields, EventTypes, GuestAccess, @@ -1487,7 +1488,7 @@ async def get_event_for_timestamp( requester: Requester, room_id: str, timestamp: int, - direction: str, + direction: Direction, ) -> Tuple[str, int]: """Find the closest event to the given timestamp in the given direction. If we can't find an event locally or the event we have locally is next to a gap, @@ -1498,7 +1499,7 @@ async def get_event_for_timestamp( room_id: Room to fetch the event from timestamp: The point in time (inclusive) we should navigate from in the given direction to find the closest event. - direction: ["f"|"b"] to indicate whether we should navigate forward + direction: indicates whether we should navigate forward or backward from the given timestamp to find the closest event. Returns: @@ -1533,13 +1534,13 @@ async def get_event_for_timestamp( local_event_id, allow_none=False, allow_rejected=False ) - if direction == "f": + if direction == Direction.FORWARDS: # We only need to check for a backward gap if we're looking forwards # to ensure there is nothing in between. is_event_next_to_backward_gap = ( await self.store.is_event_next_to_backward_gap(local_event) ) - elif direction == "b": + elif direction == Direction.BACKWARDS: # We only need to check for a forward gap if we're looking backwards # to ensure there is nothing in between is_event_next_to_forward_gap = ( diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index dead02cd5c4f..0070bd2940ea 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -13,6 +13,7 @@ # limitations under the License. """ This module contains base REST classes for constructing REST servlets. """ +import enum import logging from http import HTTPStatus from typing import ( @@ -362,6 +363,7 @@ def parse_string( request: Request, name: str, *, + default: Optional[str] = None, required: bool = False, allowed_values: Optional[Iterable[str]] = None, encoding: str = "ascii", @@ -413,6 +415,74 @@ def parse_string( ) +EnumT = TypeVar("EnumT", bound=enum.Enum) + + +@overload +def parse_enum( + request: Request, + name: str, + E: Type[EnumT], + default: EnumT, +) -> EnumT: + ... + + +@overload +def parse_enum( + request: Request, + name: str, + E: Type[EnumT], + *, + required: Literal[True], +) -> EnumT: + ... + + +def parse_enum( + request: Request, + name: str, + E: Type[EnumT], + default: Optional[EnumT] = None, + required: bool = False, +) -> Optional[EnumT]: + """ + Parse an enum parameter from the request query string. + + Note that the enum *must only have string values*. + + Args: + request: the twisted HTTP request. + name: the name of the query parameter. + E: the enum which represents valid values + default: enum value to use if the parameter is absent, defaults to None. + required: whether to raise a 400 SynapseError if the + parameter is absent, defaults to False. + + Returns: + An enum value. + + Raises: + SynapseError if the parameter is absent and required, or if the + parameter is present, must be one of a list of allowed values and + is not one of those allowed values. + """ + # Assert the enum values are strings. + assert all( + isinstance(e.value, str) for e in E + ), "parse_enum only works with string values" + str_value = parse_string( + request, + name, + default=default.value if default is not None else None, + required=required, + allowed_values=[e.value for e in E], + ) + if str_value is None: + return None + return E(str_value) + + def _parse_string_value( value: bytes, allowed_values: Optional[Iterable[str]], diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py index 6d634eef7081..a3beb74e2c3d 100644 --- a/synapse/rest/admin/event_reports.py +++ b/synapse/rest/admin/event_reports.py @@ -16,8 +16,9 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Tuple +from synapse.api.constants import Direction from synapse.api.errors import Codes, NotFoundError, SynapseError -from synapse.http.servlet import RestServlet, parse_integer, parse_string +from synapse.http.servlet import RestServlet, parse_enum, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin from synapse.types import JsonDict @@ -60,7 +61,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: start = parse_integer(request, "from", default=0) limit = parse_integer(request, "limit", default=100) - direction = parse_string(request, "dir", default="b") + direction = parse_enum(request, "dir", Direction, Direction.BACKWARDS) user_id = parse_string(request, "user_id") room_id = parse_string(request, "room_id") @@ -78,13 +79,6 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: errcode=Codes.INVALID_PARAM, ) - if direction not in ("f", "b"): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Unknown direction: %s" % (direction,), - errcode=Codes.INVALID_PARAM, - ) - event_reports, total = await self.store.get_event_reports_paginate( start, limit, direction, user_id, room_id ) diff --git a/synapse/rest/admin/federation.py b/synapse/rest/admin/federation.py index 023ed92144b4..e0ee55bd0eb6 100644 --- a/synapse/rest/admin/federation.py +++ b/synapse/rest/admin/federation.py @@ -15,9 +15,10 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Tuple +from synapse.api.constants import Direction from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.federation.transport.server import Authenticator -from synapse.http.servlet import RestServlet, parse_integer, parse_string +from synapse.http.servlet import RestServlet, parse_enum, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin from synapse.storage.databases.main.transactions import DestinationSortOrder @@ -79,7 +80,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: allowed_values=[dest.value for dest in DestinationSortOrder], ) - direction = parse_string(request, "dir", default="f", allowed_values=("f", "b")) + direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) destinations, total = await self._store.get_destinations_paginate( start, limit, destination, order_by, direction @@ -192,7 +193,7 @@ async def on_GET( errcode=Codes.INVALID_PARAM, ) - direction = parse_string(request, "dir", default="f", allowed_values=("f", "b")) + direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) rooms, total = await self._store.get_destination_rooms_paginate( destination, start, limit, direction diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 73470f09ae44..0d072c42a7aa 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -17,9 +17,16 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Tuple +from synapse.api.constants import Direction from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer -from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string +from synapse.http.servlet import ( + RestServlet, + parse_boolean, + parse_enum, + parse_integer, + parse_string, +) from synapse.http.site import SynapseRequest from synapse.rest.admin._base import ( admin_patterns, @@ -389,7 +396,7 @@ async def on_GET( # to newest media is on top for backward compatibility. if b"order_by" not in request.args and b"dir" not in request.args: order_by = MediaSortOrder.CREATED_TS.value - direction = "b" + direction = Direction.BACKWARDS else: order_by = parse_string( request, @@ -397,8 +404,8 @@ async def on_GET( default=MediaSortOrder.CREATED_TS.value, allowed_values=[sort_order.value for sort_order in MediaSortOrder], ) - direction = parse_string( - request, "dir", default="f", allowed_values=("f", "b") + direction = parse_enum( + request, "dir", Direction, default=Direction.FORWARDS ) media, total = await self.store.get_local_media_by_user_paginate( @@ -447,7 +454,7 @@ async def on_DELETE( # to newest media is on top for backward compatibility. if b"order_by" not in request.args and b"dir" not in request.args: order_by = MediaSortOrder.CREATED_TS.value - direction = "b" + direction = Direction.BACKWARDS else: order_by = parse_string( request, @@ -455,8 +462,8 @@ async def on_DELETE( default=MediaSortOrder.CREATED_TS.value, allowed_values=[sort_order.value for sort_order in MediaSortOrder], ) - direction = parse_string( - request, "dir", default="f", allowed_values=("f", "b") + direction = parse_enum( + request, "dir", Direction, default=Direction.FORWARDS ) media, _ = await self.store.get_local_media_by_user_paginate( diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index e957aa28ca4d..1d6e4982d7be 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -16,13 +16,14 @@ from typing import TYPE_CHECKING, List, Optional, Tuple, cast from urllib import parse as urlparse -from synapse.api.constants import EventTypes, JoinRules, Membership +from synapse.api.constants import Direction, EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.api.filtering import Filter from synapse.http.servlet import ( ResolveRoomIdMixin, RestServlet, assert_params_in_dict, + parse_enum, parse_integer, parse_json_object_from_request, parse_string, @@ -224,15 +225,8 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: errcode=Codes.INVALID_PARAM, ) - direction = parse_string(request, "dir", default="f") - if direction not in ("f", "b"): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Unknown direction: %s" % (direction,), - errcode=Codes.INVALID_PARAM, - ) - - reverse_order = True if direction == "b" else False + direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) + reverse_order = True if direction == Direction.BACKWARDS else False # Return list of rooms according to parameters rooms, total_rooms = await self.store.get_rooms_paginate( @@ -949,7 +943,7 @@ async def on_GET( await assert_user_is_admin(self._auth, requester) timestamp = parse_integer(request, "ts", required=True) - direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"]) + direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) ( event_id, diff --git a/synapse/rest/admin/statistics.py b/synapse/rest/admin/statistics.py index 3b142b840206..9c45f4650dc3 100644 --- a/synapse/rest/admin/statistics.py +++ b/synapse/rest/admin/statistics.py @@ -16,8 +16,9 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Tuple +from synapse.api.constants import Direction from synapse.api.errors import Codes, SynapseError -from synapse.http.servlet import RestServlet, parse_integer, parse_string +from synapse.http.servlet import RestServlet, parse_enum, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.rest.admin._base import admin_patterns, assert_requester_is_admin from synapse.storage.databases.main.stats import UserSortOrder @@ -102,13 +103,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: errcode=Codes.INVALID_PARAM, ) - direction = parse_string(request, "dir", default="f") - if direction not in ("f", "b"): - raise SynapseError( - HTTPStatus.BAD_REQUEST, - "Unknown direction: %s" % (direction,), - errcode=Codes.INVALID_PARAM, - ) + direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) users_media, total = await self.store.get_users_media_usage_paginate( start, limit, from_ts, until_ts, order_by, direction, search_term diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 0841b89c1a66..b9dca8ef3a34 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -18,12 +18,13 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Dict, List, Optional, Tuple -from synapse.api.constants import UserTypes +from synapse.api.constants import Direction, UserTypes from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.servlet import ( RestServlet, assert_params_in_dict, parse_boolean, + parse_enum, parse_integer, parse_json_object_from_request, parse_string, @@ -120,7 +121,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: ), ) - direction = parse_string(request, "dir", default="f", allowed_values=("f", "b")) + direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) users, total = await self.store.get_users_paginate( start, diff --git a/synapse/rest/client/relations.py b/synapse/rest/client/relations.py index 9dd59196d9a2..7456d6f50724 100644 --- a/synapse/rest/client/relations.py +++ b/synapse/rest/client/relations.py @@ -16,6 +16,7 @@ import re from typing import TYPE_CHECKING, Optional, Tuple +from synapse.api.constants import Direction from synapse.handlers.relations import ThreadsListInclude from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_integer, parse_string @@ -59,7 +60,7 @@ async def on_GET( requester = await self.auth.get_user_by_req(request, allow_guest=True) pagination_config = await PaginationConfig.from_request( - self._store, request, default_limit=5, default_dir="b" + self._store, request, default_limit=5, default_dir=Direction.BACKWARDS ) # The unstable version of this API returns an extra field for client diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 790614d721dc..d0db85cca77f 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -26,7 +26,7 @@ from twisted.web.server import Request from synapse import event_auth -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import Direction, EventTypes, Membership from synapse.api.errors import ( AuthError, Codes, @@ -44,6 +44,7 @@ RestServlet, assert_params_in_dict, parse_boolean, + parse_enum, parse_integer, parse_json_object_from_request, parse_string, @@ -1297,7 +1298,7 @@ async def on_GET( await self._auth.check_user_in_room_or_world_readable(room_id, requester) timestamp = parse_integer(request, "ts", required=True) - direction = parse_string(request, "dir", default="f", allowed_values=["f", "b"]) + direction = parse_enum(request, "dir", Direction, default=Direction.FORWARDS) ( event_id, diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 0e47592be31e..837dc7646e64 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -17,6 +17,7 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple, cast +from synapse.api.constants import Direction from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import ( DatabasePool, @@ -167,7 +168,7 @@ async def get_users_paginate( guests: bool = True, deactivated: bool = False, order_by: str = UserSortOrder.NAME.value, - direction: str = "f", + direction: Direction = Direction.FORWARDS, approved: bool = True, ) -> Tuple[List[JsonDict], int]: """Function to retrieve a paginated list of users from @@ -197,7 +198,7 @@ def get_users_paginate_txn( # Set ordering order_by_column = UserSortOrder(order_by).value - if direction == "b": + if direction == Direction.BACKWARDS: order = "DESC" else: order = "ASC" diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index f42af34a2f28..d7d08369cacd 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -38,7 +38,7 @@ from twisted.internet import defer -from synapse.api.constants import EventTypes +from synapse.api.constants import Direction, EventTypes from synapse.api.errors import NotFoundError, SynapseError from synapse.api.room_versions import ( KNOWN_ROOM_VERSIONS, @@ -2240,7 +2240,7 @@ def is_event_next_to_gap_txn(txn: LoggingTransaction) -> bool: ) async def get_event_id_for_timestamp( - self, room_id: str, timestamp: int, direction: str + self, room_id: str, timestamp: int, direction: Direction ) -> Optional[str]: """Find the closest event to the given timestamp in the given direction. @@ -2248,14 +2248,14 @@ async def get_event_id_for_timestamp( room_id: Room to fetch the event from timestamp: The point in time (inclusive) we should navigate from in the given direction to find the closest event. - direction: ["f"|"b"] to indicate whether we should navigate forward + direction: indicates whether we should navigate forward or backward from the given timestamp to find the closest event. Returns: The closest event_id otherwise None if we can't find any event in the given direction. """ - if direction == "b": + if direction == Direction.BACKWARDS: # Find closest event *before* a given timestamp. We use descending # (which gives values largest to smallest) because we want the # largest possible timestamp *before* the given timestamp. @@ -2307,9 +2307,6 @@ def get_event_id_for_timestamp_txn(txn: LoggingTransaction) -> Optional[str]: return None - if direction not in ("f", "b"): - raise ValueError("Unknown direction: %s" % (direction,)) - return await self.db_pool.runInteraction( "get_event_id_for_timestamp_txn", get_event_id_for_timestamp_txn, diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 9b172a64d86d..b202c5eb87a6 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -26,6 +26,7 @@ cast, ) +from synapse.api.constants import Direction from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -176,7 +177,7 @@ async def get_local_media_by_user_paginate( limit: int, user_id: str, order_by: str = MediaSortOrder.CREATED_TS.value, - direction: str = "f", + direction: Direction = Direction.FORWARDS, ) -> Tuple[List[Dict[str, Any]], int]: """Get a paginated list of metadata for a local piece of media which an user_id has uploaded @@ -199,7 +200,7 @@ def get_local_media_by_user_paginate_txn( # Set ordering order_by_column = MediaSortOrder(order_by).value - if direction == "b": + if direction == Direction.BACKWARDS: order = "DESC" else: order = "ASC" diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index fbbc01888701..4ddb27f686df 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -35,6 +35,7 @@ import attr from synapse.api.constants import ( + Direction, EventContentFields, EventTypes, JoinRules, @@ -2204,7 +2205,7 @@ async def get_event_reports_paginate( self, start: int, limit: int, - direction: str = "b", + direction: Direction = Direction.BACKWARDS, user_id: Optional[str] = None, room_id: Optional[str] = None, ) -> Tuple[List[Dict[str, Any]], int]: @@ -2213,8 +2214,8 @@ async def get_event_reports_paginate( Args: start: event offset to begin the query from limit: number of rows to retrieve - direction: Whether to fetch the most recent first (`"b"`) or the - oldest first (`"f"`) + direction: Whether to fetch the most recent first (backwards) or the + oldest first (forwards) user_id: search for user_id. Ignored if user_id is None room_id: search for room_id. Ignored if room_id is None Returns: @@ -2236,7 +2237,7 @@ def _get_event_reports_paginate_txn( filters.append("er.room_id LIKE ?") args.extend(["%" + room_id + "%"]) - if direction == "b": + if direction == Direction.BACKWARDS: order = "DESC" else: order = "ASC" diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index 0c1cbd540d6e..d7b7d0c3c909 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -22,7 +22,7 @@ from twisted.internet.defer import DeferredLock -from synapse.api.constants import EventContentFields, EventTypes, Membership +from synapse.api.constants import Direction, EventContentFields, EventTypes, Membership from synapse.api.errors import StoreError from synapse.storage.database import ( DatabasePool, @@ -663,7 +663,7 @@ async def get_users_media_usage_paginate( from_ts: Optional[int] = None, until_ts: Optional[int] = None, order_by: Optional[str] = UserSortOrder.USER_ID.value, - direction: Optional[str] = "f", + direction: Direction = Direction.FORWARDS, search_term: Optional[str] = None, ) -> Tuple[List[JsonDict], int]: """Function to retrieve a paginated list of users and their uploaded local media @@ -714,7 +714,7 @@ def get_users_media_usage_paginate_txn( 500, "Incorrect value for order_by provided: %s" % order_by ) - if direction == "b": + if direction == Direction.BACKWARDS: order = "DESC" else: order = "ASC" diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index f8c6877ee847..6b33d809b6cf 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -19,6 +19,7 @@ import attr from canonicaljson import encode_canonical_json +from synapse.api.constants import Direction from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import db_to_json from synapse.storage.database import ( @@ -496,7 +497,7 @@ async def get_destinations_paginate( limit: int, destination: Optional[str] = None, order_by: str = DestinationSortOrder.DESTINATION.value, - direction: str = "f", + direction: Direction = Direction.FORWARDS, ) -> Tuple[List[JsonDict], int]: """Function to retrieve a paginated list of destinations. This will return a json list of destinations and the @@ -518,7 +519,7 @@ def get_destinations_paginate_txn( ) -> Tuple[List[JsonDict], int]: order_by_column = DestinationSortOrder(order_by).value - if direction == "b": + if direction == Direction.BACKWARDS: order = "DESC" else: order = "ASC" @@ -550,7 +551,11 @@ def get_destinations_paginate_txn( ) async def get_destination_rooms_paginate( - self, destination: str, start: int, limit: int, direction: str = "f" + self, + destination: str, + start: int, + limit: int, + direction: Direction = Direction.FORWARDS, ) -> Tuple[List[JsonDict], int]: """Function to retrieve a paginated list of destination's rooms. This will return a json list of rooms and the @@ -569,7 +574,7 @@ def get_destination_rooms_paginate_txn( txn: LoggingTransaction, ) -> Tuple[List[JsonDict], int]: - if direction == "b": + if direction == Direction.BACKWARDS: order = "DESC" else: order = "ASC" diff --git a/synapse/streams/config.py b/synapse/streams/config.py index 5cb787518164..a04428041015 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -18,7 +18,7 @@ from synapse.api.constants import Direction from synapse.api.errors import SynapseError -from synapse.http.servlet import parse_integer, parse_string +from synapse.http.servlet import parse_enum, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.storage.databases.main import DataStore from synapse.types import StreamToken @@ -44,15 +44,9 @@ async def from_request( store: "DataStore", request: SynapseRequest, default_limit: int, - default_dir: str = "f", + default_dir: Direction = Direction.FORWARDS, ) -> "PaginationConfig": - direction_str = parse_string( - request, - "dir", - default=default_dir, - allowed_values=[Direction.FORWARDS.value, Direction.BACKWARDS.value], - ) - direction = Direction(direction_str) + direction = parse_enum(request, "dir", Direction, default=default_dir) from_tok_str = parse_string(request, "from") to_tok_str = parse_string(request, "to") diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index 8a4e5c3f777b..233eba351690 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -280,7 +280,10 @@ def test_invalid_search_order(self) -> None: self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - self.assertEqual("Unknown direction: bar", channel.json_body["error"]) + self.assertEqual( + "Query parameter 'dir' must be one of ['b', 'f']", + channel.json_body["error"], + ) def test_limit_is_negative(self) -> None: """ From 1d3a54aa3085d7d29abfc5590e9075786a5754bc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 1 Feb 2023 23:25:15 +0000 Subject: [PATCH 105/344] Bump hiredis from 2.0.0 to 2.1.1 (#14939) * Bump hiredis from 2.0.0 to 2.1.1 Bumps [hiredis](https://github.com/redis/hiredis-py) from 2.0.0 to 2.1.1. - [Release notes](https://github.com/redis/hiredis-py/releases) - [Changelog](https://github.com/redis/hiredis-py/blob/master/CHANGELOG.md) - [Commits](https://github.com/redis/hiredis-py/compare/v2.0.0...v2.1.1) --- updated-dependencies: - dependency-name: hiredis dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14939.misc | 1 + poetry.lock | 134 ++++++++++++++++++++++++++++------------- 2 files changed, 92 insertions(+), 43 deletions(-) create mode 100644 changelog.d/14939.misc diff --git a/changelog.d/14939.misc b/changelog.d/14939.misc new file mode 100644 index 000000000000..236826e44174 --- /dev/null +++ b/changelog.d/14939.misc @@ -0,0 +1 @@ +Bump hiredis from 2.0.0 to 2.1.1. diff --git a/poetry.lock b/poetry.lock index 99628643f0d7..2d58788709a5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -501,53 +501,101 @@ typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\"" [[package]] name = "hiredis" -version = "2.0.0" +version = "2.1.1" description = "Python wrapper for hiredis" category = "main" optional = true -python-versions = ">=3.6" +python-versions = ">=3.7" files = [ - {file = "hiredis-2.0.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b4c8b0bc5841e578d5fb32a16e0c305359b987b850a06964bd5a62739d688048"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0adea425b764a08270820531ec2218d0508f8ae15a448568109ffcae050fee26"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:3d55e36715ff06cdc0ab62f9591607c4324297b6b6ce5b58cb9928b3defe30ea"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:5d2a48c80cf5a338d58aae3c16872f4d452345e18350143b3bf7216d33ba7b99"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:240ce6dc19835971f38caf94b5738092cb1e641f8150a9ef9251b7825506cb05"}, - {file = "hiredis-2.0.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:5dc7a94bb11096bc4bffd41a3c4f2b958257085c01522aa81140c68b8bf1630a"}, - {file = "hiredis-2.0.0-cp36-cp36m-win32.whl", hash = "sha256:139705ce59d94eef2ceae9fd2ad58710b02aee91e7fa0ccb485665ca0ecbec63"}, - {file = "hiredis-2.0.0-cp36-cp36m-win_amd64.whl", hash = "sha256:c39c46d9e44447181cd502a35aad2bb178dbf1b1f86cf4db639d7b9614f837c6"}, - {file = "hiredis-2.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:adf4dd19d8875ac147bf926c727215a0faf21490b22c053db464e0bf0deb0485"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:0f41827028901814c709e744060843c77e78a3aca1e0d6875d2562372fcb405a"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:508999bec4422e646b05c95c598b64bdbef1edf0d2b715450a078ba21b385bcc"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:0d5109337e1db373a892fdcf78eb145ffb6bbd66bb51989ec36117b9f7f9b579"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:04026461eae67fdefa1949b7332e488224eac9e8f2b5c58c98b54d29af22093e"}, - {file = "hiredis-2.0.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:a00514362df15af041cc06e97aebabf2895e0a7c42c83c21894be12b84402d79"}, - {file = "hiredis-2.0.0-cp37-cp37m-win32.whl", hash = "sha256:09004096e953d7ebd508cded79f6b21e05dff5d7361771f59269425108e703bc"}, - {file = "hiredis-2.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:f8196f739092a78e4f6b1b2172679ed3343c39c61a3e9d722ce6fcf1dac2824a"}, - {file = "hiredis-2.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:294a6697dfa41a8cba4c365dd3715abc54d29a86a40ec6405d677ca853307cfb"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:3dddf681284fe16d047d3ad37415b2e9ccdc6c8986c8062dbe51ab9a358b50a5"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:dcef843f8de4e2ff5e35e96ec2a4abbdf403bd0f732ead127bd27e51f38ac298"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:87c7c10d186f1743a8fd6a971ab6525d60abd5d5d200f31e073cd5e94d7e7a9d"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:7f0055f1809b911ab347a25d786deff5e10e9cf083c3c3fd2dd04e8612e8d9db"}, - {file = "hiredis-2.0.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:11d119507bb54e81f375e638225a2c057dda748f2b1deef05c2b1a5d42686048"}, - {file = "hiredis-2.0.0-cp38-cp38-win32.whl", hash = "sha256:7492af15f71f75ee93d2a618ca53fea8be85e7b625e323315169977fae752426"}, - {file = "hiredis-2.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:65d653df249a2f95673976e4e9dd7ce10de61cfc6e64fa7eeaa6891a9559c581"}, - {file = "hiredis-2.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ae8427a5e9062ba66fc2c62fb19a72276cf12c780e8db2b0956ea909c48acff5"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:3f5f7e3a4ab824e3de1e1700f05ad76ee465f5f11f5db61c4b297ec29e692b2e"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:e3447d9e074abf0e3cd85aef8131e01ab93f9f0e86654db7ac8a3f73c63706ce"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:8b42c0dc927b8d7c0eb59f97e6e34408e53bc489f9f90e66e568f329bff3e443"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:b84f29971f0ad4adaee391c6364e6f780d5aae7e9226d41964b26b49376071d0"}, - {file = "hiredis-2.0.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:0b39ec237459922c6544d071cdcf92cbb5bc6685a30e7c6d985d8a3e3a75326e"}, - {file = "hiredis-2.0.0-cp39-cp39-win32.whl", hash = "sha256:a7928283143a401e72a4fad43ecc85b35c27ae699cf5d54d39e1e72d97460e1d"}, - {file = "hiredis-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:a4ee8000454ad4486fb9f28b0cab7fa1cd796fc36d639882d0b34109b5b3aec9"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1f03d4dadd595f7a69a75709bc81902673fa31964c75f93af74feac2f134cc54"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-manylinux1_x86_64.whl", hash = "sha256:04927a4c651a0e9ec11c68e4427d917e44ff101f761cd3b5bc76f86aaa431d27"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-manylinux2010_x86_64.whl", hash = "sha256:a39efc3ade8c1fb27c097fd112baf09d7fd70b8cb10ef1de4da6efbe066d381d"}, - {file = "hiredis-2.0.0-pp36-pypy36_pp73-win32.whl", hash = "sha256:07bbf9bdcb82239f319b1f09e8ef4bdfaec50ed7d7ea51a56438f39193271163"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:807b3096205c7cec861c8803a6738e33ed86c9aae76cac0e19454245a6bbbc0a"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-manylinux1_x86_64.whl", hash = "sha256:1233e303645f468e399ec906b6b48ab7cd8391aae2d08daadbb5cad6ace4bd87"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-manylinux2010_x86_64.whl", hash = "sha256:cb2126603091902767d96bcb74093bd8b14982f41809f85c9b96e519c7e1dc41"}, - {file = "hiredis-2.0.0-pp37-pypy37_pp73-win32.whl", hash = "sha256:f52010e0a44e3d8530437e7da38d11fb822acfb0d5b12e9cd5ba655509937ca0"}, - {file = "hiredis-2.0.0.tar.gz", hash = "sha256:81d6d8e39695f2c37954d1011c0480ef7cf444d4e3ae24bc5e89ee5de360139a"}, + {file = "hiredis-2.1.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:f15e48545dadf3760220821d2f3c850e0c67bbc66aad2776c9d716e6216b5103"}, + {file = "hiredis-2.1.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b3a437e3af246dd06d116f1615cdf4e620e639dfcc923fe3045e00f6a967fc27"}, + {file = "hiredis-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b61732d75e2222a3b0060b97395df78693d5c3487fe4a5d0b75f6ac1affc68b9"}, + {file = "hiredis-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:170c2080966721b42c5a8726e91c5fc271300a4ac9ddf8a5b79856cfd47553e1"}, + {file = "hiredis-2.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d2d6e4caaffaf42faf14cfdf20b1d6fff6b557137b44e9569ea6f1877e6f375d"}, + {file = "hiredis-2.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d64b2d90302f0dd9e9ba43e89f8640f35b6d5968668da82ba2d2652b2cc3c3d2"}, + {file = "hiredis-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61fd1c55efb48ba734628f096e7a50baf0df3f18e91183face5c07fba3b4beb7"}, + {file = "hiredis-2.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfc5e923828714f314737e7f856b3dccf8805e5679fe23f07241b397cd785f6c"}, + {file = "hiredis-2.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ef2aa0485735c8608a92964e52ab9025ceb6003776184a1eb5d1701742cc910b"}, + {file = "hiredis-2.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2d39193900a03b900a25d474b9f787434f05a282b402f063d4ca02c62d61bdb9"}, + {file = "hiredis-2.1.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:4b51f5eb47e61c6b82cb044a1815903a77a4f840fa050fd2ff40d617c102d16c"}, + {file = "hiredis-2.1.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:d9145d011b74bef972b485a09f391babaa101626dbb54afc2313d5682a746593"}, + {file = "hiredis-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6f45509b43d720d64837c1211fcdea42acd48e71539b7152d74c16413ceea080"}, + {file = "hiredis-2.1.1-cp310-cp310-win32.whl", hash = "sha256:3a284bbf6503cd6ac1183b3542fe853a8be47fb52a631224f6dda46ba229d572"}, + {file = "hiredis-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:f60fad285db733b2badba43f7036a1241cb3e19c17260348f3ff702e6eaa4980"}, + {file = "hiredis-2.1.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:69c20816ac2af11701caf10e5b027fd33c6e8dfe7806ab71bc5191aa2a6d50f9"}, + {file = "hiredis-2.1.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:cd43dbaa73322a0c125122114cbc2c37141353b971751d05798f3b9780091e90"}, + {file = "hiredis-2.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c9632cd480fbc09c14622038a9a5f2f21ef6ce35892e9fa4df8d3308d3f2cedf"}, + {file = "hiredis-2.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:252d4a254f1566012b94e35cba577a001d3a732fa91e824d2076233222232cf9"}, + {file = "hiredis-2.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b901e68f3a6da279388e5dbe8d3bc562dd6dd3ff8a4b90e4f62e94de36461777"}, + {file = "hiredis-2.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f45f296998043345ecfc4f69a51fa4f3e80ca3659864df80b459095580968a6"}, + {file = "hiredis-2.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79f2acf237428dd61faa5b49247999ff68f45b3552c57303fcfabd2002eab249"}, + {file = "hiredis-2.1.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82bc6f5b92c9fcd5b5d6506000dd433006b126b193932c52a9bcc10dcc10e4fc"}, + {file = "hiredis-2.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19843e4505069085301c3126c91b4e48970070fb242d7c617fb6777e83b55541"}, + {file = "hiredis-2.1.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c7336fddae533cbe786360d7a0316c71fe96313872c06cde20a969765202ab04"}, + {file = "hiredis-2.1.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:90b4355779970e121c219def3e35533ec2b24773a26fc4aa0f8271dd262fa2f2"}, + {file = "hiredis-2.1.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4beaac5047317a73b27cf15b4f4e0d2abaafa8378e1a6ed4cf9ff420d8f88aba"}, + {file = "hiredis-2.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7e25dc06e02689a45a49fa5e2f48bdfdbc11c5b52bef792a8cb37e0b82a7b0ae"}, + {file = "hiredis-2.1.1-cp311-cp311-win32.whl", hash = "sha256:f8b3233c1de155743ef34b0cae494e33befed5e0adba77762f5d8a8e417c5015"}, + {file = "hiredis-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:4ced076af04e28761d486501c58259247c1882fd19c7f94c18a257d143248eee"}, + {file = "hiredis-2.1.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:f4300e063045e11ee79b79a7c9426813ab8d97e340b15843374093225dde407d"}, + {file = "hiredis-2.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b04b6c04fe13e1e30ba6f9340d3d0fb776a7e52611d11809fb59341871e050e5"}, + {file = "hiredis-2.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:436dcbbe3104737e8b4e2d63a019a764d107d72d6b6ee3cd107097c1c263fd1e"}, + {file = "hiredis-2.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:11801d9e96f39286ab558c6db940c39fc00150450ae1007d18b35437d2f79ad7"}, + {file = "hiredis-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7d8d0ca7b4f6136f8a29845d31cfbc3f562cbe71f26da6fca55aa4977e45a18"}, + {file = "hiredis-2.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c040af9eb9b12602b4b714b90a1c2ac1109e939498d47b0748ec33e7a948747"}, + {file = "hiredis-2.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f448146b86a8693dda5f02bb4cb2ef65c894db2cf743e7bf351978354ce685e3"}, + {file = "hiredis-2.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:649c5a1f0952af50f008f0bbec5f0b1e519150220c0a71ef80541a0c128d0c13"}, + {file = "hiredis-2.1.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b8e7415b0952b0dd6df3aa2d37b5191c85e54d6a0ac1449ddb1e9039bbb39fa5"}, + {file = "hiredis-2.1.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:38c1a56a30b953e3543662f950f498cfb17afed214b27f4fc497728fb623e0c9"}, + {file = "hiredis-2.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6050b519fb3b62d68a28a1941ae9dc5122e8820fef2b8e20a65cb3c1577332a0"}, + {file = "hiredis-2.1.1-cp37-cp37m-win32.whl", hash = "sha256:96add2a205efffe5e19a256a50be0ed78fcb5e9503242c65f57928e95cf4c901"}, + {file = "hiredis-2.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:8ceb101095f8cce9ac672ed7244b002d83ea97af7f27bb73f2fbe7fe8e8f03c7"}, + {file = "hiredis-2.1.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:9f068136e5119f2ba939ecd45c47b4e3cf6dd7ca9a65b6078c838029c5c1f564"}, + {file = "hiredis-2.1.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8a42e246a03086ae1430f789e37d7192113db347417932745c4700d8999f853a"}, + {file = "hiredis-2.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5359811bfdb10fca234cba4629e555a1cde6c8136025395421f486ce43129ae3"}, + {file = "hiredis-2.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d304746e2163d3d2cbc4c08925539e00d2bb3edc9e79fce531b5468d4e264d15"}, + {file = "hiredis-2.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4fe297a52a8fc1204eef646bebf616263509d089d472e25742913924b1449099"}, + {file = "hiredis-2.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:637e563d5cbf79d8b04224f99cfce8001146647e7ce198f0b032e32e62079e3c"}, + {file = "hiredis-2.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b61340ff2dcd99d5ded0ca5fc33c878d89a1426e2f7b6dbc7c7381e330bc8a"}, + {file = "hiredis-2.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66eaf6d5ea5207177ba8ffb9ee479eea743292267caf1d6b89b51cf9d5885d23"}, + {file = "hiredis-2.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4d2d0e458c32cdafd9a0f0b0aaeb61b169583d074287721eee740b730b7654bd"}, + {file = "hiredis-2.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8a92781e466f2f1f9d38720d8920cb094bc0d59f88219591bc12b1c12c9d471c"}, + {file = "hiredis-2.1.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:5560b09304ebaac5323a7402f5090f2a8559843200014f5adf1ff7517dd3805b"}, + {file = "hiredis-2.1.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4732a0bf877bbd69d4d1b38a3db2160252acb31894a48f324fd54f742f6b2123"}, + {file = "hiredis-2.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b5bd33ac8a572e2aa94b489dec35b0c00ca554b27e56ad19953e0bf2cbcf3ad8"}, + {file = "hiredis-2.1.1-cp38-cp38-win32.whl", hash = "sha256:07e86649773e486a21e170d1396217e15833776d9e8f4a7121c28a1d37e032c9"}, + {file = "hiredis-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:b964d81db8f11a99552621acd24c97381a0fd401a57187ce9f8cb9a53f4b6f4e"}, + {file = "hiredis-2.1.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:27e89e7befc785a273cccb105840db54b7f93005adf4e68c516d57b19ea2aac2"}, + {file = "hiredis-2.1.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ea6f0f98e1721741b5bc3167a495a9f16459fe67648054be05365a67e67c29ba"}, + {file = "hiredis-2.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:40c34aeecccb9474999839299c9d2d5ff46a62ed47c58645b7965f48944abd74"}, + {file = "hiredis-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65927e75da4265ec88d06cbdab20113a9e69bbac3aea1ec053d4d940f1c88fc8"}, + {file = "hiredis-2.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72cab67bcceb2e998da2f28aad9ec7b1a5ece5888f7ac3d3723cccba62338703"}, + {file = "hiredis-2.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d67429ff99231137491d8c3daa097c767a9c273bb03ac412ed8f6acb89e2e52f"}, + {file = "hiredis-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c596bce5e9dd379c68c17208716da2767bb6f6f2a71d748f9e4c247ced31e6"}, + {file = "hiredis-2.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e0aab2d6e60aa9f9e14c83396b4a58fb4aded712806486c79189bcae4a175ac"}, + {file = "hiredis-2.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:17deb7d218a5ae9f05d2b19d51936231546973303747924fc17a2869aef0029a"}, + {file = "hiredis-2.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d3d60e2af4ce93d6e45a50a9b5795156a8725495e411c7987a2f81ab14e99665"}, + {file = "hiredis-2.1.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:fbc960cd91e55e2281e1a330e7d1c4970b6a05567dd973c96e412b4d012e17c6"}, + {file = "hiredis-2.1.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0ae718e9db4b622072ff73d38bc9cd7711edfedc8a1e08efe25a6c8170446da4"}, + {file = "hiredis-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e51e3fa176fecd19660f898c4238232e8ca0f5709e6451a664c996f9aec1b8e1"}, + {file = "hiredis-2.1.1-cp39-cp39-win32.whl", hash = "sha256:0258bb84b4a1e015f14f891d91957042fa88f6f4e86cc0808d735ebbc1e3fc88"}, + {file = "hiredis-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c5a47c964c58c044a323336a798d8729722e09865d7e087eb3512df6146b39a8"}, + {file = "hiredis-2.1.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8de0334c212e069d49952e476e16c6b42ba9677cc1e2d2f4588bd9a39489a3ab"}, + {file = "hiredis-2.1.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:653e33f69202c00eca35416ee23091447ad1e9f9a556cc2b715b2befcfc31b3c"}, + {file = "hiredis-2.1.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f14cccf931c859ba3169d766e892a3673a79649ec2ceca7ba95ea376b23fd222"}, + {file = "hiredis-2.1.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86c56359fd7aca6a9ca41af91636aef15d5ad6d19e631ebd662f233c79f7e100"}, + {file = "hiredis-2.1.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:c2b197e3613c3aef3933b2c6eb095bd4be9c84022aea52057697b709b400c4bc"}, + {file = "hiredis-2.1.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ec060d6db9576f6723b5290448aea67160608556b5506eb947997d9d1ca6f7b7"}, + {file = "hiredis-2.1.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8781f5b91d75abef529a33cf3509ba5fe540d2814de0c4602f0f5ba6f1669739"}, + {file = "hiredis-2.1.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9bd6b934794bea92a15b10ac35889df63b28d2abf9d020a7c87c05dd9c6e1edd"}, + {file = "hiredis-2.1.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf6d85c1ffb4ec4a859b2f31cd8845e633f91ed971a3cce6f59a722dcc361b8c"}, + {file = "hiredis-2.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:bbf80c686e3f63d40b0ab42d3605d3b6d415c368a5d8a9764a314ebda6138650"}, + {file = "hiredis-2.1.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c1d85dfdf37a8df0e0174fc0c762b485b80a2fc7ce9592ae109aaf4a5d45ba9a"}, + {file = "hiredis-2.1.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:816b9ea96e7cc2496a1ac9c4a76db670827c1e31045cc377c66e64a20bb4b3ff"}, + {file = "hiredis-2.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db59afa0edf194bea782e4686bfc496fc1cea2e24f310d769641e343d14cc929"}, + {file = "hiredis-2.1.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c7a7e4ccec7164cdf2a9bbedc0e7430492eb56d9355a41377f40058c481bccc"}, + {file = "hiredis-2.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:646f150fa73f9cbc69419e34a1aae318c9f39bd9640760aa46624b2815da0c2d"}, + {file = "hiredis-2.1.1.tar.gz", hash = "sha256:21751e4b7737aaf7261a068758b22f7670155099592b28d8dde340bf6874313d"}, ] [[package]] From 58214dbb9b8a85c0dafc65162e9c20ee1885ce4e Mon Sep 17 00:00:00 2001 From: realtyem Date: Wed, 1 Feb 2023 17:42:45 -0600 Subject: [PATCH 106/344] Allow enabling the asyncio reactor in complement (#14858) Signed-off-by: Jason Little realtyem@gmail.com --- .github/workflows/tests.yml | 5 ++++- changelog.d/14858.misc | 1 + .../complement/conf/start_for_complement.sh | 13 +++++++++++- docs/development/contributing_guide.md | 1 + scripts-dev/complement.sh | 5 +++++ synapse/app/complement_fork_starter.py | 21 +++++++++++++++++-- 6 files changed, 42 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14858.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index f184727cedb5..6561b490bcf5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -541,8 +541,11 @@ jobs: - run: | set -o pipefail - POSTGRES=${{ (matrix.database == 'Postgres') && 1 || '' }} WORKERS=${{ (matrix.arrangement == 'workers') && 1 || '' }} COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt + COMPLEMENT_DIR=`pwd`/complement synapse/scripts-dev/complement.sh -json 2>&1 | synapse/.ci/scripts/gotestfmt shell: bash + env: + POSTGRES: ${{ (matrix.database == 'Postgres') && 1 || '' }} + WORKERS: ${{ (matrix.arrangement == 'workers') && 1 || '' }} name: Run Complement Tests cargo-test: diff --git a/changelog.d/14858.misc b/changelog.d/14858.misc new file mode 100644 index 000000000000..c48f40cd3876 --- /dev/null +++ b/changelog.d/14858.misc @@ -0,0 +1 @@ +Run the integration test suites with the asyncio reactor enabled in CI. diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh index 49d79745b064..af13209c54e9 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh @@ -6,7 +6,7 @@ set -e echo "Complement Synapse launcher" echo " Args: $@" -echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS" +echo " Env: SYNAPSE_COMPLEMENT_DATABASE=$SYNAPSE_COMPLEMENT_DATABASE SYNAPSE_COMPLEMENT_USE_WORKERS=$SYNAPSE_COMPLEMENT_USE_WORKERS SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR" function log { d=$(date +"%Y-%m-%d %H:%M:%S,%3N") @@ -76,6 +76,17 @@ else fi +if [[ -n "$SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR" ]]; then + if [[ -n "$SYNAPSE_USE_EXPERIMENTAL_FORKING_LAUNCHER" ]]; then + export SYNAPSE_COMPLEMENT_FORKING_LAUNCHER_ASYNC_IO_REACTOR="1" + else + export SYNAPSE_ASYNC_IO_REACTOR="1" + fi +else + export SYNAPSE_ASYNC_IO_REACTOR="0" +fi + + # Add Complement's appservice registration directory, if there is one # (It can be absent when there are no application services in this test!) if [ -d /complement/appservice ]; then diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 3cbfe9698762..36bc88468468 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -332,6 +332,7 @@ The above will run a monolithic (single-process) Synapse with SQLite as the data [here](https://github.com/matrix-org/synapse/blob/develop/docker/configure_workers_and_start.py#L54). A safe example would be `WORKER_TYPES="federation_inbound, federation_sender, synchrotron"`. See the [worker documentation](../workers.md) for additional information on workers. +- Passing `ASYNCIO_REACTOR=1` as an environment variable to use the Twisted asyncio reactor instead of the default one. To increase the log level for the tests, set `SYNAPSE_TEST_LOG_LEVEL`, e.g: ```sh diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index e72d96fd165c..66aaa3d8480d 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -228,6 +228,11 @@ else test_tags="$test_tags,msc2716" fi +if [[ -n "$ASYNCIO_REACTOR" ]]; then + # Enable the Twisted asyncio reactor + export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true +fi + if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then # Set the log level to what is desired diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py index 8c0f4a57e70a..920538f44df2 100644 --- a/synapse/app/complement_fork_starter.py +++ b/synapse/app/complement_fork_starter.py @@ -110,6 +110,8 @@ def _worker_entrypoint( and then kick off the worker's main() function. """ + from synapse.util.stringutils import strtobool + sys.argv = args # reset the custom signal handlers that we installed, so that the children start @@ -117,9 +119,24 @@ def _worker_entrypoint( for sig, handler in _original_signal_handlers.items(): signal.signal(sig, handler) - from twisted.internet.epollreactor import EPollReactor + # Install the asyncio reactor if the + # SYNAPSE_COMPLEMENT_FORKING_LAUNCHER_ASYNC_IO_REACTOR is set to 1. The + # SYNAPSE_ASYNC_IO_REACTOR variable would be used, but then causes + # synapse/__init__.py to also try to install an asyncio reactor. + if strtobool( + os.environ.get("SYNAPSE_COMPLEMENT_FORKING_LAUNCHER_ASYNC_IO_REACTOR", "0") + ): + import asyncio + + from twisted.internet.asyncioreactor import AsyncioSelectorReactor + + reactor = AsyncioSelectorReactor(asyncio.get_event_loop()) + proxy_reactor._install_real_reactor(reactor) + else: + from twisted.internet.epollreactor import EPollReactor + + proxy_reactor._install_real_reactor(EPollReactor()) - proxy_reactor._install_real_reactor(EPollReactor()) func() From da8a9571131de8161eb2cccbb5d4ba23b1f5d85e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 1 Feb 2023 19:01:06 -0500 Subject: [PATCH 107/344] Make extension-module optional, but default. (#14965) --- changelog.d/14965.misc | 1 + rust/Cargo.toml | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14965.misc diff --git a/changelog.d/14965.misc b/changelog.d/14965.misc new file mode 100644 index 000000000000..3ae0537d9665 --- /dev/null +++ b/changelog.d/14965.misc @@ -0,0 +1 @@ +Allow running `cargo` without the `extension-module` option. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index cffaa5b51b94..09e2bba5e5c2 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -23,13 +23,17 @@ name = "synapse.synapse_rust" anyhow = "1.0.63" lazy_static = "1.4.0" log = "0.4.17" -pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] } +pyo3 = { version = "0.17.1", features = ["macros", "anyhow", "abi3", "abi3-py37"] } pyo3-log = "0.7.0" pythonize = "0.17.0" regex = "1.6.0" serde = { version = "1.0.144", features = ["derive"] } serde_json = "1.0.85" +[features] +extension-module = ["pyo3/extension-module"] +default = ["extension-module"] + [build-dependencies] blake2 = "0.10.4" hex = "0.4.3" From f398886ab840061952a4d96ffe2261369c90640a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 2 Feb 2023 07:21:46 -0500 Subject: [PATCH 108/344] Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955 (#14968) --- .github/workflows/latest_deps.yml | 6 +++--- .github/workflows/tests.yml | 18 +++++++++--------- .github/workflows/twisted_trunk.yml | 6 +++--- changelog.d/14968.misc | 1 + 4 files changed, 16 insertions(+), 15 deletions(-) create mode 100644 changelog.d/14968.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 9ede9e90bf38..99fc2cee08c4 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -27,7 +27,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -61,7 +61,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -134,7 +134,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: stable - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 6561b490bcf5..e945ffe7f3ca 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -112,7 +112,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: 1.58.1 components: clippy @@ -134,7 +134,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: nightly-2022-12-01 components: clippy @@ -154,7 +154,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: 1.58.1 components: rustfmt @@ -221,7 +221,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -266,7 +266,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -386,7 +386,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -531,7 +531,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -562,7 +562,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -585,7 +585,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: nightly-2022-12-01 - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 47fae0b79bee..a59c8dac09b8 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -43,7 +43,7 @@ jobs: - run: sudo apt-get -qq install xmlsec1 - name: Install Rust - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -82,7 +82,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f + uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 with: toolchain: stable - uses: Swatinem/rust-cache@v2 diff --git a/changelog.d/14968.misc b/changelog.d/14968.misc new file mode 100644 index 000000000000..a96c977f3b19 --- /dev/null +++ b/changelog.d/14968.misc @@ -0,0 +1 @@ +Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. From 2186ebed6c9dfe15cfa8a8a7c97c2a89a907f9a8 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 2 Feb 2023 16:49:14 +0000 Subject: [PATCH 109/344] Fetch fewer events when getting hosts in room (#14962) --- changelog.d/14962.feature | 1 + synapse/storage/databases/main/roommember.py | 46 +++++++++++++++++++- 2 files changed, 45 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14962.feature diff --git a/changelog.d/14962.feature b/changelog.d/14962.feature new file mode 100644 index 000000000000..38f26012f23c --- /dev/null +++ b/changelog.d/14962.feature @@ -0,0 +1 @@ +Improve performance when joining or sending an event large rooms. diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 8e2ba7b7b470..ea6a5e2f3495 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging +from itertools import chain from typing import ( TYPE_CHECKING, AbstractSet, @@ -1131,12 +1132,33 @@ async def _get_joined_hosts( else: # The cache doesn't match the state group or prev state group, # so we calculate the result from first principles. + # + # We need to fetch all hosts joined to the room according to `state` by + # inspecting all join memberships in `state`. However, if the `state` is + # relatively recent then many of its events are likely to be held in + # the current state of the room, which is easily available and likely + # cached. + # + # We therefore compute the set of `state` events not in the + # current state and only fetch those. + current_memberships = ( + await self._get_approximate_current_memberships_in_room(room_id) + ) + unknown_state_events = {} + joined_users_in_current_state = [] + + for (type, state_key), event_id in state.items(): + if event_id not in current_memberships: + unknown_state_events[type, state_key] = event_id + elif current_memberships[event_id] == Membership.JOIN: + joined_users_in_current_state.append(state_key) + joined_user_ids = await self.get_joined_user_ids_from_state( - room_id, state + room_id, unknown_state_events ) cache.hosts_to_joined_users = {} - for user_id in joined_user_ids: + for user_id in chain(joined_user_ids, joined_users_in_current_state): host = intern_string(get_domain_from_id(user_id)) cache.hosts_to_joined_users.setdefault(host, set()).add(user_id) @@ -1147,6 +1169,26 @@ async def _get_joined_hosts( return frozenset(cache.hosts_to_joined_users) + async def _get_approximate_current_memberships_in_room( + self, room_id: str + ) -> Mapping[str, Optional[str]]: + """Build a map from event id to membership, for all events in the current state. + + The event ids of non-memberships events (e.g. `m.room.power_levels`) are present + in the result, mapped to values of `None`. + + The result is approximate for partially-joined rooms. It is fully accurate + for fully-joined rooms. + """ + + rows = await self.db_pool.simple_select_list( + "current_state_events", + keyvalues={"room_id": room_id}, + retcols=("event_id", "membership"), + desc="has_completed_background_updates", + ) + return {row["event_id"]: row["membership"] for row in rows} + @cached(max_entries=10000) def _get_joined_hosts_cache(self, room_id: str) -> "_JoinedHostsCache": return _JoinedHostsCache() From f36da501be4287e723a0a53ac4568d836676a15d Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 2 Feb 2023 11:58:20 -0500 Subject: [PATCH 110/344] Do not calculate presence or ephemeral events when they are filtered out (#14970) This expands the previous optimisation from being only for initial sync to being for all sync requests. It also inverts some of the logic to be inclusive instead of exclusive. --- changelog.d/14970.misc | 1 + synapse/handlers/sync.py | 19 +++++++++---------- 2 files changed, 10 insertions(+), 10 deletions(-) create mode 100644 changelog.d/14970.misc diff --git a/changelog.d/14970.misc b/changelog.d/14970.misc new file mode 100644 index 000000000000..365762360285 --- /dev/null +++ b/changelog.d/14970.misc @@ -0,0 +1 @@ +Improve performance of `/sync` in a few situations. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 5235e294608d..0cb8d5ef4b66 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1459,10 +1459,12 @@ async def generate_sync_result( sync_result_builder, account_data_by_room ) - block_all_presence_data = ( - since_token is None and sync_config.filter_collection.blocks_all_presence() + # Presence data is included if the server has it enabled and not filtered out. + include_presence_data = ( + self.hs_config.server.use_presence + and not sync_config.filter_collection.blocks_all_presence() ) - if self.hs_config.server.use_presence and not block_all_presence_data: + if include_presence_data: logger.debug("Fetching presence data") await self._generate_sync_entry_for_presence( sync_result_builder, @@ -1841,15 +1843,12 @@ async def _generate_sync_entry_for_rooms( """ since_token = sync_result_builder.since_token - - # 1. Start by fetching all ephemeral events in rooms we've joined (if required). user_id = sync_result_builder.sync_config.user.to_string() - block_all_room_ephemeral = ( - since_token is None - and sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral() - ) - if block_all_room_ephemeral: + # 1. Start by fetching all ephemeral events in rooms we've joined (if required). + if ( + sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral() + ): ephemeral_by_room: Dict[str, List[JsonDict]] = {} else: now_token, ephemeral_by_room = await self.ephemeral_by_room( From da05b70af5bf84825332b2ac0d63c6deda4b376f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 2 Feb 2023 13:45:12 -0500 Subject: [PATCH 111/344] Skip unused calculations in sync handler. (#14908) If a sync request does not need to calculate per-room entries & is not generating presence & is not generating device list data (e.g. during initial sync) avoid the expensive calculation of room specific data. This is a micro-optimisation for clients syncing simply to receive to-device information. --- changelog.d/14908.misc | 1 + synapse/api/filtering.py | 3 + synapse/handlers/sync.py | 258 ++++++++++++++++++++------------------- 3 files changed, 137 insertions(+), 125 deletions(-) create mode 100644 changelog.d/14908.misc diff --git a/changelog.d/14908.misc b/changelog.d/14908.misc new file mode 100644 index 000000000000..365762360285 --- /dev/null +++ b/changelog.d/14908.misc @@ -0,0 +1 @@ +Improve performance of `/sync` in a few situations. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 4cf8f0cc8ef9..2b5af264b43d 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -283,6 +283,9 @@ async def filter_room_account_data( await self._room_filter.filter(events) ) + def blocks_all_rooms(self) -> bool: + return self._room_filter.filters_all_rooms() + def blocks_all_presence(self) -> bool: return ( self._presence_filter.filters_all_types() diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 0cb8d5ef4b66..3566537894e6 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1448,41 +1448,67 @@ async def generate_sync_result( sync_result_builder ) - logger.debug("Fetching room data") - - ( - newly_joined_rooms, - newly_joined_or_invited_or_knocked_users, - newly_left_rooms, - newly_left_users, - ) = await self._generate_sync_entry_for_rooms( - sync_result_builder, account_data_by_room - ) - # Presence data is included if the server has it enabled and not filtered out. - include_presence_data = ( + include_presence_data = bool( self.hs_config.server.use_presence and not sync_config.filter_collection.blocks_all_presence() ) - if include_presence_data: - logger.debug("Fetching presence data") - await self._generate_sync_entry_for_presence( - sync_result_builder, + # Device list updates are sent if a since token is provided. + include_device_list_updates = bool(since_token and since_token.device_list_key) + + # If we do not care about the rooms or things which depend on the room + # data (namely presence and device list updates), then we can skip + # this process completely. + device_lists = DeviceListUpdates() + if ( + not sync_result_builder.sync_config.filter_collection.blocks_all_rooms() + or include_presence_data + or include_device_list_updates + ): + logger.debug("Fetching room data") + + # Note that _generate_sync_entry_for_rooms sets sync_result_builder.joined, which + # is used in calculate_user_changes below. + ( newly_joined_rooms, - newly_joined_or_invited_or_knocked_users, + newly_left_rooms, + ) = await self._generate_sync_entry_for_rooms( + sync_result_builder, account_data_by_room ) + # Work out which users have joined or left rooms we're in. We use this + # to build the presence and device_list parts of the sync response in + # `_generate_sync_entry_for_presence` and + # `_generate_sync_entry_for_device_list` respectively. + if include_presence_data or include_device_list_updates: + # This uses the sync_result_builder.joined which is set in + # `_generate_sync_entry_for_rooms`, if that didn't find any joined + # rooms for some reason it is a no-op. + ( + newly_joined_or_invited_or_knocked_users, + newly_left_users, + ) = sync_result_builder.calculate_user_changes() + + if include_presence_data: + logger.debug("Fetching presence data") + await self._generate_sync_entry_for_presence( + sync_result_builder, + newly_joined_rooms, + newly_joined_or_invited_or_knocked_users, + ) + + if include_device_list_updates: + device_lists = await self._generate_sync_entry_for_device_list( + sync_result_builder, + newly_joined_rooms=newly_joined_rooms, + newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users, + newly_left_rooms=newly_left_rooms, + newly_left_users=newly_left_users, + ) + logger.debug("Fetching to-device data") await self._generate_sync_entry_for_to_device(sync_result_builder) - device_lists = await self._generate_sync_entry_for_device_list( - sync_result_builder, - newly_joined_rooms=newly_joined_rooms, - newly_joined_or_invited_or_knocked_users=newly_joined_or_invited_or_knocked_users, - newly_left_rooms=newly_left_rooms, - newly_left_users=newly_left_users, - ) - logger.debug("Fetching OTK data") device_id = sync_config.device_id one_time_keys_count: JsonDict = {} @@ -1551,6 +1577,7 @@ async def _generate_sync_entry_for_device_list( user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token + assert since_token is not None # Take a copy since these fields will be mutated later. newly_joined_or_invited_or_knocked_users = set( @@ -1558,92 +1585,85 @@ async def _generate_sync_entry_for_device_list( ) newly_left_users = set(newly_left_users) - if since_token and since_token.device_list_key: - # We want to figure out what user IDs the client should refetch - # device keys for, and which users we aren't going to track changes - # for anymore. - # - # For the first step we check: - # a. if any users we share a room with have updated their devices, - # and - # b. we also check if we've joined any new rooms, or if a user has - # joined a room we're in. - # - # For the second step we just find any users we no longer share a - # room with by looking at all users that have left a room plus users - # that were in a room we've left. + # We want to figure out what user IDs the client should refetch + # device keys for, and which users we aren't going to track changes + # for anymore. + # + # For the first step we check: + # a. if any users we share a room with have updated their devices, + # and + # b. we also check if we've joined any new rooms, or if a user has + # joined a room we're in. + # + # For the second step we just find any users we no longer share a + # room with by looking at all users that have left a room plus users + # that were in a room we've left. - users_that_have_changed = set() + users_that_have_changed = set() - joined_rooms = sync_result_builder.joined_room_ids + joined_rooms = sync_result_builder.joined_room_ids - # Step 1a, check for changes in devices of users we share a room - # with - # - # We do this in two different ways depending on what we have cached. - # If we already have a list of all the user that have changed since - # the last sync then it's likely more efficient to compare the rooms - # they're in with the rooms the syncing user is in. - # - # If we don't have that info cached then we get all the users that - # share a room with our user and check if those users have changed. - cache_result = self.store.get_cached_device_list_changes( - since_token.device_list_key - ) - if cache_result.hit: - changed_users = cache_result.entities - - result = await self.store.get_rooms_for_users(changed_users) - - for changed_user_id, entries in result.items(): - # Check if the changed user shares any rooms with the user, - # or if the changed user is the syncing user (as we always - # want to include device list updates of their own devices). - if user_id == changed_user_id or any( - rid in joined_rooms for rid in entries - ): - users_that_have_changed.add(changed_user_id) - else: - users_that_have_changed = ( - await self._device_handler.get_device_changes_in_shared_rooms( - user_id, - sync_result_builder.joined_room_ids, - from_token=since_token, - ) - ) - - # Step 1b, check for newly joined rooms - for room_id in newly_joined_rooms: - joined_users = await self.store.get_users_in_room(room_id) - newly_joined_or_invited_or_knocked_users.update(joined_users) + # Step 1a, check for changes in devices of users we share a room + # with + # + # We do this in two different ways depending on what we have cached. + # If we already have a list of all the user that have changed since + # the last sync then it's likely more efficient to compare the rooms + # they're in with the rooms the syncing user is in. + # + # If we don't have that info cached then we get all the users that + # share a room with our user and check if those users have changed. + cache_result = self.store.get_cached_device_list_changes( + since_token.device_list_key + ) + if cache_result.hit: + changed_users = cache_result.entities - # TODO: Check that these users are actually new, i.e. either they - # weren't in the previous sync *or* they left and rejoined. - users_that_have_changed.update(newly_joined_or_invited_or_knocked_users) + result = await self.store.get_rooms_for_users(changed_users) - user_signatures_changed = ( - await self.store.get_users_whose_signatures_changed( - user_id, since_token.device_list_key + for changed_user_id, entries in result.items(): + # Check if the changed user shares any rooms with the user, + # or if the changed user is the syncing user (as we always + # want to include device list updates of their own devices). + if user_id == changed_user_id or any( + rid in joined_rooms for rid in entries + ): + users_that_have_changed.add(changed_user_id) + else: + users_that_have_changed = ( + await self._device_handler.get_device_changes_in_shared_rooms( + user_id, + sync_result_builder.joined_room_ids, + from_token=since_token, ) ) - users_that_have_changed.update(user_signatures_changed) - # Now find users that we no longer track - for room_id in newly_left_rooms: - left_users = await self.store.get_users_in_room(room_id) - newly_left_users.update(left_users) + # Step 1b, check for newly joined rooms + for room_id in newly_joined_rooms: + joined_users = await self.store.get_users_in_room(room_id) + newly_joined_or_invited_or_knocked_users.update(joined_users) - # Remove any users that we still share a room with. - left_users_rooms = await self.store.get_rooms_for_users(newly_left_users) - for user_id, entries in left_users_rooms.items(): - if any(rid in joined_rooms for rid in entries): - newly_left_users.discard(user_id) + # TODO: Check that these users are actually new, i.e. either they + # weren't in the previous sync *or* they left and rejoined. + users_that_have_changed.update(newly_joined_or_invited_or_knocked_users) - return DeviceListUpdates( - changed=users_that_have_changed, left=newly_left_users - ) - else: - return DeviceListUpdates() + user_signatures_changed = await self.store.get_users_whose_signatures_changed( + user_id, since_token.device_list_key + ) + users_that_have_changed.update(user_signatures_changed) + + # Now find users that we no longer track + for room_id in newly_left_rooms: + left_users = await self.store.get_users_in_room(room_id) + newly_left_users.update(left_users) + + # Remove any users that we still share a room with. + left_users_rooms = await self.store.get_rooms_for_users(newly_left_users) + for user_id, entries in left_users_rooms.items(): + if any(rid in joined_rooms for rid in entries): + newly_left_users.discard(user_id) + + return DeviceListUpdates(changed=users_that_have_changed, left=newly_left_users) @trace async def _generate_sync_entry_for_to_device( @@ -1720,6 +1740,7 @@ async def _generate_sync_entry_for_account_data( since_token = sync_result_builder.since_token if since_token and not sync_result_builder.full_state: + # TODO Do not fetch room account data if it will be unused. ( global_account_data, account_data_by_room, @@ -1736,6 +1757,7 @@ async def _generate_sync_entry_for_account_data( sync_config.user ) else: + # TODO Do not fetch room account data if it will be unused. ( global_account_data, account_data_by_room, @@ -1818,7 +1840,7 @@ async def _generate_sync_entry_for_rooms( self, sync_result_builder: "SyncResultBuilder", account_data_by_room: Dict[str, Dict[str, JsonDict]], - ) -> Tuple[AbstractSet[str], AbstractSet[str], AbstractSet[str], AbstractSet[str]]: + ) -> Tuple[AbstractSet[str], AbstractSet[str]]: """Generates the rooms portion of the sync response. Populates the `sync_result_builder` with the result. @@ -1831,24 +1853,22 @@ async def _generate_sync_entry_for_rooms( account_data_by_room: Dictionary of per room account data Returns: - Returns a 4-tuple describing rooms the user has joined or left, and users who've - joined or left rooms any rooms the user is in. This gets used later in - `_generate_sync_entry_for_device_list`. + Returns a 2-tuple describing rooms the user has joined or left. Its entries are: - newly_joined_rooms - - newly_joined_or_invited_or_knocked_users - newly_left_rooms - - newly_left_users """ since_token = sync_result_builder.since_token user_id = sync_result_builder.sync_config.user.to_string() # 1. Start by fetching all ephemeral events in rooms we've joined (if required). - if ( - sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral() - ): + block_all_room_ephemeral = ( + sync_result_builder.sync_config.filter_collection.blocks_all_rooms() + or sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral() + ) + if block_all_room_ephemeral: ephemeral_by_room: Dict[str, List[JsonDict]] = {} else: now_token, ephemeral_by_room = await self.ephemeral_by_room( @@ -1870,7 +1890,7 @@ async def _generate_sync_entry_for_rooms( ) if not tags_by_room: logger.debug("no-oping sync") - return set(), set(), set(), set() + return set(), set() # 3. Work out which rooms need reporting in the sync response. ignored_users = await self.store.ignored_users(user_id) @@ -1899,6 +1919,7 @@ async def _generate_sync_entry_for_rooms( # joined or archived). async def handle_room_entries(room_entry: "RoomSyncResultBuilder") -> None: logger.debug("Generating room entry for %s", room_entry.room_id) + # Note that this mutates sync_result_builder.{joined,archived}. await self._generate_room_entry( sync_result_builder, room_entry, @@ -1915,20 +1936,7 @@ async def handle_room_entries(room_entry: "RoomSyncResultBuilder") -> None: sync_result_builder.invited.extend(invited) sync_result_builder.knocked.extend(knocked) - # 5. Work out which users have joined or left rooms we're in. We use this - # to build the device_list part of the sync response in - # `_generate_sync_entry_for_device_list`. - ( - newly_joined_or_invited_or_knocked_users, - newly_left_users, - ) = sync_result_builder.calculate_user_changes() - - return ( - set(newly_joined_rooms), - newly_joined_or_invited_or_knocked_users, - set(newly_left_rooms), - newly_left_users, - ) + return set(newly_joined_rooms), set(newly_left_rooms) async def _have_rooms_changed( self, sync_result_builder: "SyncResultBuilder" From 8e9fc28c6aff6bb1aa960dfde4f9736fee1ae4fb Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 3 Feb 2023 08:27:31 -0500 Subject: [PATCH 112/344] Reload the pyo3-log config when the Python logging config changes. (#14976) Since pyo3-log is initialized very early in the Python start-up it caches the state of the loggers before they're fully initialized (and thus are essentially disabled). Whenever we reload the logging configuration we now also tell pyo3-log to discard any cached logging configuration it has; it will refetch the current logging configuration from Python at the next point it logs. This fixes Rust log lines not appearing in the homeserver logs. --- changelog.d/14976.bugfix | 1 + rust/src/lib.rs | 17 ++++++++-- stubs/synapse/synapse_rust/__init__.pyi | 1 + synapse/config/logger.py | 42 ++++++++++++++----------- tests/test_utils/logging_setup.py | 3 ++ 5 files changed, 44 insertions(+), 20 deletions(-) create mode 100644 changelog.d/14976.bugfix diff --git a/changelog.d/14976.bugfix b/changelog.d/14976.bugfix new file mode 100644 index 000000000000..0cde046c0e00 --- /dev/null +++ b/changelog.d/14976.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.68.0 where logging from the Rust module was not properly logged. diff --git a/rust/src/lib.rs b/rust/src/lib.rs index c7b60e58a73b..ce67f5861183 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -1,7 +1,13 @@ +use lazy_static::lazy_static; use pyo3::prelude::*; +use pyo3_log::ResetHandle; pub mod push; +lazy_static! { + static ref LOGGING_HANDLE: ResetHandle = pyo3_log::init(); +} + /// Returns the hash of all the rust source files at the time it was compiled. /// /// Used by python to detect if the rust library is outdated. @@ -17,13 +23,20 @@ fn sum_as_string(a: usize, b: usize) -> PyResult { Ok((a + b).to_string()) } +/// Reset the cached logging configuration of pyo3-log to pick up any changes +/// in the Python logging configuration. +/// +#[pyfunction] +fn reset_logging_config() { + LOGGING_HANDLE.reset(); +} + /// The entry point for defining the Python module. #[pymodule] fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> { - pyo3_log::init(); - m.add_function(wrap_pyfunction!(sum_as_string, m)?)?; m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?; + m.add_function(wrap_pyfunction!(reset_logging_config, m)?)?; push::register_module(py, m)?; diff --git a/stubs/synapse/synapse_rust/__init__.pyi b/stubs/synapse/synapse_rust/__init__.pyi index 8658d3138f89..d25c60910662 100644 --- a/stubs/synapse/synapse_rust/__init__.pyi +++ b/stubs/synapse/synapse_rust/__init__.pyi @@ -1,2 +1,3 @@ def sum_as_string(a: int, b: int) -> str: ... def get_rust_file_digest() -> str: ... +def reset_logging_config() -> None: ... diff --git a/synapse/config/logger.py b/synapse/config/logger.py index 5468b963a2c1..56db875b25b1 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -34,6 +34,7 @@ from synapse.logging.context import LoggingContextFilter from synapse.logging.filter import MetadataFilter +from synapse.synapse_rust import reset_logging_config from synapse.types import JsonDict from ..util import SYNAPSE_VERSION @@ -200,24 +201,6 @@ def _setup_stdlib_logging( """ Set up Python standard library logging. """ - if log_config_path is None: - log_format = ( - "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" - " - %(message)s" - ) - - logger = logging.getLogger("") - logger.setLevel(logging.INFO) - logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) - - formatter = logging.Formatter(log_format) - - handler = logging.StreamHandler() - handler.setFormatter(formatter) - logger.addHandler(handler) - else: - # Load the logging configuration. - _load_logging_config(log_config_path) # We add a log record factory that runs all messages through the # LoggingContextFilter so that we get the context *at the time we log* @@ -237,6 +220,26 @@ def factory(*args: Any, **kwargs: Any) -> logging.LogRecord: logging.setLogRecordFactory(factory) + # Configure the logger with the initial configuration. + if log_config_path is None: + log_format = ( + "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" + " - %(message)s" + ) + + logger = logging.getLogger("") + logger.setLevel(logging.INFO) + logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) + + formatter = logging.Formatter(log_format) + + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + else: + # Load the logging configuration. + _load_logging_config(log_config_path) + # Route Twisted's native logging through to the standard library logging # system. observer = STDLibLogObserver() @@ -294,6 +297,9 @@ def _load_logging_config(log_config_path: str) -> None: logging.config.dictConfig(log_config) + # Blow away the pyo3-log cache so that it reloads the configuration. + reset_logging_config() + def _reload_logging_config(log_config_path: Optional[str]) -> None: """ diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index 9228454c9e78..304c7b98c5c9 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -17,6 +17,7 @@ import twisted.logger from synapse.logging.context import LoggingContextFilter +from synapse.synapse_rust import reset_logging_config class ToTwistedHandler(logging.Handler): @@ -52,3 +53,5 @@ def setup_logging(): log_level = os.environ.get("SYNAPSE_TEST_LOG_LEVEL", "ERROR") root_logger.setLevel(log_level) + + reset_logging_config() From 0a686d1d13c497af84f62ca192a401fdc18387ab Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 3 Feb 2023 15:39:59 +0000 Subject: [PATCH 113/344] Faster joins: Refactor handling of servers in room (#14954) Ensure that the list of servers in a partial state room always contains the server we joined off. Also refactor `get_partial_state_servers_at_join` to return `None` when the given room is no longer partial stated, to explicitly indicate when the room has partial state. Otherwise it's not clear whether an empty list means that the room has full state, or the room is partial stated, but the server we joined off told us that there are no servers in the room. Signed-off-by: Sean Quah --- changelog.d/14954.misc | 1 + synapse/federation/federation_client.py | 33 ++++++++++------ synapse/federation/sender/__init__.py | 2 +- synapse/handlers/device.py | 1 + synapse/handlers/federation.py | 20 +++++++--- synapse/storage/controllers/state.py | 3 +- synapse/storage/databases/main/room.py | 50 ++++++++++++++++--------- tests/handlers/test_federation.py | 2 +- tests/handlers/test_room_member.py | 2 +- 9 files changed, 77 insertions(+), 37 deletions(-) create mode 100644 changelog.d/14954.misc diff --git a/changelog.d/14954.misc b/changelog.d/14954.misc new file mode 100644 index 000000000000..b86b6bf01e24 --- /dev/null +++ b/changelog.d/14954.misc @@ -0,0 +1 @@ +Faster room joins: Refactor internal handling of servers in room to never store an empty list. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 8493ffc2e5ee..0ac85a3be717 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -19,6 +19,7 @@ import logging from typing import ( TYPE_CHECKING, + AbstractSet, Awaitable, Callable, Collection, @@ -110,8 +111,9 @@ class SendJoinResult: # True if 'state' elides non-critical membership events partial_state: bool - # if 'partial_state' is set, a list of the servers in the room (otherwise empty) - servers_in_room: List[str] + # If 'partial_state' is set, a set of the servers in the room (otherwise empty). + # Always contains the server we joined off. + servers_in_room: AbstractSet[str] class FederationClient(FederationBase): @@ -1152,15 +1154,24 @@ async def _execute(pdu: EventBase) -> None: % (auth_chain_create_events,) ) - if response.members_omitted and not response.servers_in_room: - raise InvalidResponseError( - "members_omitted was set, but no servers were listed in the room" - ) + servers_in_room = None + if response.servers_in_room is not None: + servers_in_room = set(response.servers_in_room) - if response.members_omitted and not partial_state: - raise InvalidResponseError( - "members_omitted was set, but we asked for full state" - ) + if response.members_omitted: + if not servers_in_room: + raise InvalidResponseError( + "members_omitted was set, but no servers were listed in the room" + ) + + if not partial_state: + raise InvalidResponseError( + "members_omitted was set, but we asked for full state" + ) + + # `servers_in_room` is supposed to be a complete list. + # Fix things up in case the remote homeserver is badly behaved. + servers_in_room.add(destination) return SendJoinResult( event=event, @@ -1168,7 +1179,7 @@ async def _execute(pdu: EventBase) -> None: auth_chain=signed_auth, origin=destination, partial_state=response.members_omitted, - servers_in_room=response.servers_in_room or [], + servers_in_room=servers_in_room or frozenset(), ) # MSC3083 defines additional error codes for room joins. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 30ebd62883b1..43421a9c727b 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -447,7 +447,7 @@ async def handle_event(event: EventBase) -> None: ) ) - if len(partial_state_destinations) > 0: + if partial_state_destinations is not None: destinations = partial_state_destinations if destinations is None: diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 5c0607390137..6f7963df43ae 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -859,6 +859,7 @@ async def handle_room_un_partial_stated(self, room_id: str) -> None: known_hosts_at_join = await self.store.get_partial_state_servers_at_join( room_id ) + assert known_hosts_at_join is not None potentially_changed_hosts.difference_update(known_hosts_at_join) potentially_changed_hosts.discard(self.server_name) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index dc1cbf5c3d13..7f64130e0aa1 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -20,7 +20,17 @@ import logging from enum import Enum from http import HTTPStatus -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Set, Tuple, Union +from typing import ( + TYPE_CHECKING, + AbstractSet, + Dict, + Iterable, + List, + Optional, + Set, + Tuple, + Union, +) import attr from prometheus_client import Histogram @@ -169,7 +179,7 @@ def __init__(self, hs: "HomeServer"): # A dictionary mapping room IDs to (initial destination, other destinations) # tuples. self._partial_state_syncs_maybe_needing_restart: Dict[ - str, Tuple[Optional[str], StrCollection] + str, Tuple[Optional[str], AbstractSet[str]] ] = {} # A lock guarding the partial state flag for rooms. # When the lock is held for a given room, no other concurrent code may @@ -1720,7 +1730,7 @@ async def _resume_partial_state_room_sync(self) -> None: def _start_partial_state_room_sync( self, initial_destination: Optional[str], - other_destinations: StrCollection, + other_destinations: AbstractSet[str], room_id: str, ) -> None: """Starts the background process to resync the state of a partial state room, @@ -1802,7 +1812,7 @@ async def _sync_partial_state_room_wrapper() -> None: async def _sync_partial_state_room( self, initial_destination: Optional[str], - other_destinations: StrCollection, + other_destinations: AbstractSet[str], room_id: str, ) -> None: """Background process to resync the state of a partial-state room @@ -1939,7 +1949,7 @@ async def _sync_partial_state_room( def _prioritise_destinations_for_partial_state_resync( initial_destination: Optional[str], - other_destinations: StrCollection, + other_destinations: AbstractSet[str], room_id: str, ) -> StrCollection: """Work out the order in which we should ask servers to resync events. diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 2045169b9a5d..52efd4a1719b 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -569,10 +569,11 @@ async def get_current_hosts_in_room_or_partial_state_approximation( is arbitrary for rooms with partial state. """ # We have to read this list first to mitigate races with un-partial stating. - # This will be empty for rooms with full state. hosts_at_join = await self.stores.main.get_partial_state_servers_at_join( room_id ) + if hosts_at_join is None: + hosts_at_join = frozenset() hosts_from_state = await self.stores.main.get_current_hosts_in_room(room_id) diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 4ddb27f686df..644bbb88788f 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -18,6 +18,7 @@ from enum import Enum from typing import ( TYPE_CHECKING, + AbstractSet, Any, Awaitable, Collection, @@ -25,7 +26,6 @@ List, Mapping, Optional, - Sequence, Set, Tuple, Union, @@ -109,7 +109,7 @@ class RoomSortOrder(Enum): @attr.s(slots=True, frozen=True, auto_attribs=True) class PartialStateResyncInfo: joined_via: Optional[str] - servers_in_room: List[str] = attr.ib(factory=list) + servers_in_room: Set[str] = attr.ib(factory=set) class RoomWorkerStore(CacheInvalidationWorkerStore): @@ -1193,21 +1193,35 @@ def get_rooms_for_retention_period_in_range_txn( get_rooms_for_retention_period_in_range_txn, ) - @cached(iterable=True) - async def get_partial_state_servers_at_join(self, room_id: str) -> Sequence[str]: - """Gets the list of servers in a partial state room at the time we joined it. + async def get_partial_state_servers_at_join( + self, room_id: str + ) -> Optional[AbstractSet[str]]: + """Gets the set of servers in a partial state room at the time we joined it. Returns: The `servers_in_room` list from the `/send_join` response for partial state rooms. May not be accurate or complete, as it comes from a remote homeserver. - An empty list for full state rooms. + `None` for full state rooms. """ - return await self.db_pool.simple_select_onecol( - "partial_state_rooms_servers", - keyvalues={"room_id": room_id}, - retcol="server_name", - desc="get_partial_state_servers_at_join", + servers_in_room = await self._get_partial_state_servers_at_join(room_id) + + if len(servers_in_room) == 0: + return None + + return servers_in_room + + @cached(iterable=True) + async def _get_partial_state_servers_at_join( + self, room_id: str + ) -> AbstractSet[str]: + return frozenset( + await self.db_pool.simple_select_onecol( + "partial_state_rooms_servers", + keyvalues={"room_id": room_id}, + retcol="server_name", + desc="get_partial_state_servers_at_join", + ) ) async def get_partial_state_room_resync_info( @@ -1252,7 +1266,7 @@ async def get_partial_state_room_resync_info( # partial-joined between the two SELECTs, but this is unlikely to happen # in practice.) continue - entry.servers_in_room.append(server_name) + entry.servers_in_room.add(server_name) return room_servers @@ -1942,7 +1956,7 @@ async def upsert_room_on_join( async def store_partial_state_room( self, room_id: str, - servers: Collection[str], + servers: AbstractSet[str], device_lists_stream_id: int, joined_via: str, ) -> None: @@ -1957,11 +1971,13 @@ async def store_partial_state_room( Args: room_id: the ID of the room - servers: other servers known to be in the room + servers: other servers known to be in the room. must include `joined_via`. device_lists_stream_id: the device_lists stream ID at the time when we first joined the room. joined_via: the server name we requested a partial join from. """ + assert joined_via in servers + await self.db_pool.runInteraction( "store_partial_state_room", self._store_partial_state_room_txn, @@ -1975,7 +1991,7 @@ def _store_partial_state_room_txn( self, txn: LoggingTransaction, room_id: str, - servers: Collection[str], + servers: AbstractSet[str], device_lists_stream_id: int, joined_via: str, ) -> None: @@ -1998,7 +2014,7 @@ def _store_partial_state_room_txn( ) self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,)) self._invalidate_cache_and_stream( - txn, self.get_partial_state_servers_at_join, (room_id,) + txn, self._get_partial_state_servers_at_join, (room_id,) ) async def write_partial_state_rooms_join_event_id( @@ -2409,7 +2425,7 @@ def _clear_partial_state_room_txn( ) self._invalidate_cache_and_stream(txn, self.is_partial_state_room, (room_id,)) self._invalidate_cache_and_stream( - txn, self.get_partial_state_servers_at_join, (room_id,) + txn, self._get_partial_state_servers_at_join, (room_id,) ) DatabasePool.simple_insert_txn( diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index c1558c40c370..57675fa407e4 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -656,7 +656,7 @@ def test_failed_partial_join_is_clean(self) -> None: EVENT_INVITATION_MEMBERSHIP, ], partial_state=True, - servers_in_room=["example.com"], + servers_in_room={"example.com"}, ) ) ) diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 6bbfd5dc843f..6a38893b688a 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -171,7 +171,7 @@ def test_remote_joins_contribute_to_rate_limit(self) -> None: state=[create_event], auth_chain=[create_event], partial_state=False, - servers_in_room=[], + servers_in_room=frozenset(), ) ) ) From 52700a0bcf2caaa792b94e2a8c12f29d1c61b91e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 3 Feb 2023 11:28:20 -0500 Subject: [PATCH 114/344] Support the backwards compatibility features in MSC3952. (#14958) If the feature is enabled and the event has a `m.mentions` property, skip processing of the legacy mentions rules. --- changelog.d/14958.feature | 1 + rust/benches/evaluator.rs | 4 + rust/src/push/evaluator.rs | 19 ++ stubs/synapse/synapse_rust/push.pyi | 1 + synapse/push/bulk_push_rule_evaluator.py | 9 +- tests/push/test_bulk_push_rule_evaluator.py | 191 ++++++++++++++------ tests/push/test_push_rule_evaluator.py | 18 +- 7 files changed, 184 insertions(+), 59 deletions(-) create mode 100644 changelog.d/14958.feature diff --git a/changelog.d/14958.feature b/changelog.d/14958.feature new file mode 100644 index 000000000000..8293e99effbc --- /dev/null +++ b/changelog.d/14958.feature @@ -0,0 +1 @@ +Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 6b16a3f75b75..859d54961cad 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -33,6 +33,7 @@ fn bench_match_exact(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, + false, BTreeSet::new(), false, 10, @@ -71,6 +72,7 @@ fn bench_match_word(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, + false, BTreeSet::new(), false, 10, @@ -109,6 +111,7 @@ fn bench_match_word_miss(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, + false, BTreeSet::new(), false, 10, @@ -147,6 +150,7 @@ fn bench_eval_message(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, + false, BTreeSet::new(), false, 10, diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index aa71202e4379..da6f704c0ec6 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -68,6 +68,8 @@ pub struct PushRuleEvaluator { /// The "content.body", if any. body: String, + /// True if the event has a mentions property and MSC3952 support is enabled. + has_mentions: bool, /// The user mentions that were part of the message. user_mentions: BTreeSet, /// True if the message is a room message. @@ -105,6 +107,7 @@ impl PushRuleEvaluator { #[new] pub fn py_new( flattened_keys: BTreeMap, + has_mentions: bool, user_mentions: BTreeSet, room_mention: bool, room_member_count: u64, @@ -123,6 +126,7 @@ impl PushRuleEvaluator { Ok(PushRuleEvaluator { flattened_keys, body, + has_mentions, user_mentions, room_mention, room_member_count, @@ -155,6 +159,19 @@ impl PushRuleEvaluator { } let rule_id = &push_rule.rule_id().to_string(); + + // For backwards-compatibility the legacy mention rules are disabled + // if the event contains the 'm.mentions' property (and if the + // experimental feature is enabled, both of these are represented + // by the has_mentions flag). + if self.has_mentions + && (rule_id == "global/override/.m.rule.contains_display_name" + || rule_id == "global/content/.m.rule.contains_user_name" + || rule_id == "global/override/.m.rule.roomnotif") + { + continue; + } + let extev_flag = &RoomVersionFeatures::ExtensibleEvents.as_str().to_string(); let supports_extensible_events = self.room_version_feature_flags.contains(extev_flag); let safe_from_rver_condition = SAFE_EXTENSIBLE_EVENTS_RULE_IDS.contains(rule_id); @@ -441,6 +458,7 @@ fn push_rule_evaluator() { flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string()); let evaluator = PushRuleEvaluator::py_new( flattened_keys, + false, BTreeSet::new(), false, 10, @@ -468,6 +486,7 @@ fn test_requires_room_version_supports_condition() { let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()]; let evaluator = PushRuleEvaluator::py_new( flattened_keys, + false, BTreeSet::new(), false, 10, diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 588d90c25a82..c0af2af3df1f 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -56,6 +56,7 @@ class PushRuleEvaluator: def __init__( self, flattened_keys: Mapping[str, str], + has_mentions: bool, user_mentions: Set[str], room_mention: bool, room_member_count: int, diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 88cfc05d0552..9bf92b9765e4 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -119,6 +119,9 @@ def __init__(self, hs: "HomeServer"): self.should_calculate_push_rules = self.hs.config.push.enable_push self._related_event_match_enabled = self.hs.config.experimental.msc3664_enabled + self._intentional_mentions_enabled = ( + self.hs.config.experimental.msc3952_intentional_mentions + ) self.room_push_rule_cache_metrics = register_cache( "cache", @@ -364,9 +367,12 @@ async def _action_for_event_by_user( # Pull out any user and room mentions. mentions = event.content.get(EventContentFields.MSC3952_MENTIONS) + has_mentions = self._intentional_mentions_enabled and isinstance(mentions, dict) user_mentions: Set[str] = set() room_mention = False - if isinstance(mentions, dict): + if has_mentions: + # mypy seems to have lost the type even though it must be a dict here. + assert isinstance(mentions, dict) # Remove out any non-string items and convert to a set. user_mentions_raw = mentions.get("user_ids") if isinstance(user_mentions_raw, list): @@ -378,6 +384,7 @@ async def _action_for_event_by_user( evaluator = PushRuleEvaluator( _flatten_dict(event, room_version=event.room_version), + has_mentions, user_mentions, room_mention, room_member_count, diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index fda48d9f61db..3b2d082dcb2b 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any +from typing import Any, Optional from unittest.mock import patch from parameterized import parameterized @@ -25,7 +25,7 @@ from synapse.rest import admin from synapse.rest.client import login, register, room from synapse.server import HomeServer -from synapse.types import create_requester +from synapse.types import JsonDict, create_requester from synapse.util import Clock from tests.test_utils import simple_async_mock @@ -196,77 +196,144 @@ def test_action_for_event_by_user_disabled_by_config(self) -> None: self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)])) bulk_evaluator._action_for_event_by_user.assert_not_called() - @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) - def test_mentions(self) -> None: - """Test the behavior of an event which includes invalid mentions.""" - bulk_evaluator = BulkPushRuleEvaluator(self.hs) - - sentinel = object() - - def create_and_process(mentions: Any = sentinel) -> bool: - """Returns true iff the `mentions` trigger an event push action.""" - content = {} - if mentions is not sentinel: - content[EventContentFields.MSC3952_MENTIONS] = mentions - - # Create a new message event which should cause a notification. - event, context = self.get_success( - self.event_creation_handler.create_event( - self.requester, - { - "type": "test", - "room_id": self.room_id, - "content": content, - "sender": f"@bob:{self.hs.hostname}", - }, - ) + def _create_and_process( + self, bulk_evaluator: BulkPushRuleEvaluator, content: Optional[JsonDict] = None + ) -> bool: + """Returns true iff the `mentions` trigger an event push action.""" + # Create a new message event which should cause a notification. + event, context = self.get_success( + self.event_creation_handler.create_event( + self.requester, + { + "type": "test", + "room_id": self.room_id, + "content": content or {}, + "sender": f"@bob:{self.hs.hostname}", + }, ) + ) - # Ensure no actions are generated! - self.get_success( - bulk_evaluator.action_for_events_by_user([(event, context)]) - ) + # Execute the push rule machinery. + self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)])) - # If any actions are generated for this event, return true. - result = self.get_success( - self.hs.get_datastores().main.db_pool.simple_select_list( - table="event_push_actions_staging", - keyvalues={"event_id": event.event_id}, - retcols=("*",), - desc="get_event_push_actions_staging", - ) + # If any actions are generated for this event, return true. + result = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + table="event_push_actions_staging", + keyvalues={"event_id": event.event_id}, + retcols=("*",), + desc="get_event_push_actions_staging", ) - return len(result) > 0 + ) + return len(result) > 0 + + @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) + def test_user_mentions(self) -> None: + """Test the behavior of an event which includes invalid user mentions.""" + bulk_evaluator = BulkPushRuleEvaluator(self.hs) # Not including the mentions field should not notify. - self.assertFalse(create_and_process()) + self.assertFalse(self._create_and_process(bulk_evaluator)) # An empty mentions field should not notify. - self.assertFalse(create_and_process({})) + self.assertFalse( + self._create_and_process( + bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {}} + ) + ) # Non-dict mentions should be ignored. mentions: Any for mentions in (None, True, False, 1, "foo", []): - self.assertFalse(create_and_process(mentions)) + self.assertFalse( + self._create_and_process( + bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: mentions} + ) + ) # A non-list should be ignored. for mentions in (None, True, False, 1, "foo", {}): - self.assertFalse(create_and_process({"user_ids": mentions})) + self.assertFalse( + self._create_and_process( + bulk_evaluator, + {EventContentFields.MSC3952_MENTIONS: {"user_ids": mentions}}, + ) + ) # The Matrix ID appearing anywhere in the list should notify. - self.assertTrue(create_and_process({"user_ids": [self.alice]})) - self.assertTrue(create_and_process({"user_ids": ["@another:test", self.alice]})) + self.assertTrue( + self._create_and_process( + bulk_evaluator, + {EventContentFields.MSC3952_MENTIONS: {"user_ids": [self.alice]}}, + ) + ) + self.assertTrue( + self._create_and_process( + bulk_evaluator, + { + EventContentFields.MSC3952_MENTIONS: { + "user_ids": ["@another:test", self.alice] + } + }, + ) + ) # Duplicate user IDs should notify. - self.assertTrue(create_and_process({"user_ids": [self.alice, self.alice]})) + self.assertTrue( + self._create_and_process( + bulk_evaluator, + { + EventContentFields.MSC3952_MENTIONS: { + "user_ids": [self.alice, self.alice] + } + }, + ) + ) # Invalid entries in the list are ignored. - self.assertFalse(create_and_process({"user_ids": [None, True, False, {}, []]})) + self.assertFalse( + self._create_and_process( + bulk_evaluator, + { + EventContentFields.MSC3952_MENTIONS: { + "user_ids": [None, True, False, {}, []] + } + }, + ) + ) self.assertTrue( - create_and_process({"user_ids": [None, True, False, {}, [], self.alice]}) + self._create_and_process( + bulk_evaluator, + { + EventContentFields.MSC3952_MENTIONS: { + "user_ids": [None, True, False, {}, [], self.alice] + } + }, + ) ) + # The legacy push rule should not mention if the mentions field exists. + self.assertFalse( + self._create_and_process( + bulk_evaluator, + { + "body": self.alice, + "msgtype": "m.text", + EventContentFields.MSC3952_MENTIONS: {}, + }, + ) + ) + + @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) + def test_room_mentions(self) -> None: + """Test the behavior of an event which includes invalid room mentions.""" + bulk_evaluator = BulkPushRuleEvaluator(self.hs) + # Room mentions from those without power should not notify. - self.assertFalse(create_and_process({"room": True})) + self.assertFalse( + self._create_and_process( + bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {"room": True}} + ) + ) # Room mentions from those with power should notify. self.helper.send_state( @@ -276,8 +343,30 @@ def create_and_process(mentions: Any = sentinel) -> bool: self.token, state_key="", ) - self.assertTrue(create_and_process({"room": True})) + self.assertTrue( + self._create_and_process( + bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: {"room": True}} + ) + ) # Invalid data should not notify. + mentions: Any for mentions in (None, False, 1, "foo", [], {}): - self.assertFalse(create_and_process({"room": mentions})) + self.assertFalse( + self._create_and_process( + bulk_evaluator, + {EventContentFields.MSC3952_MENTIONS: {"room": mentions}}, + ) + ) + + # The legacy push rule should not mention if the mentions field exists. + self.assertFalse( + self._create_and_process( + bulk_evaluator, + { + "body": "@room", + "msgtype": "m.text", + EventContentFields.MSC3952_MENTIONS: {}, + }, + ) + ) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 9d01c989d408..81661e181bf9 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -42,6 +42,7 @@ def _get_evaluator( self, content: JsonMapping, *, + has_mentions: bool = False, user_mentions: Optional[Set[str]] = None, room_mention: bool = False, related_events: Optional[JsonDict] = None, @@ -62,6 +63,7 @@ def _get_evaluator( power_levels: Dict[str, Union[int, Dict[str, int]]] = {} return PushRuleEvaluator( _flatten_dict(event), + has_mentions, user_mentions or set(), room_mention, room_member_count, @@ -102,19 +104,21 @@ def test_user_mentions(self) -> None: condition = {"kind": "org.matrix.msc3952.is_user_mention"} # No mentions shouldn't match. - evaluator = self._get_evaluator({}) + evaluator = self._get_evaluator({}, has_mentions=True) self.assertFalse(evaluator.matches(condition, "@user:test", None)) # An empty set shouldn't match - evaluator = self._get_evaluator({}, user_mentions=set()) + evaluator = self._get_evaluator({}, has_mentions=True, user_mentions=set()) self.assertFalse(evaluator.matches(condition, "@user:test", None)) # The Matrix ID appearing anywhere in the mentions list should match - evaluator = self._get_evaluator({}, user_mentions={"@user:test"}) + evaluator = self._get_evaluator( + {}, has_mentions=True, user_mentions={"@user:test"} + ) self.assertTrue(evaluator.matches(condition, "@user:test", None)) evaluator = self._get_evaluator( - {}, user_mentions={"@another:test", "@user:test"} + {}, has_mentions=True, user_mentions={"@another:test", "@user:test"} ) self.assertTrue(evaluator.matches(condition, "@user:test", None)) @@ -126,16 +130,16 @@ def test_room_mentions(self) -> None: condition = {"kind": "org.matrix.msc3952.is_room_mention"} # No room mention shouldn't match. - evaluator = self._get_evaluator({}) + evaluator = self._get_evaluator({}, has_mentions=True) self.assertFalse(evaluator.matches(condition, None, None)) # Room mention should match. - evaluator = self._get_evaluator({}, room_mention=True) + evaluator = self._get_evaluator({}, has_mentions=True, room_mention=True) self.assertTrue(evaluator.matches(condition, None, None)) # A room mention and user mention is valid. evaluator = self._get_evaluator( - {}, user_mentions={"@another:test"}, room_mention=True + {}, has_mentions=True, user_mentions={"@another:test"}, room_mention=True ) self.assertTrue(evaluator.matches(condition, None, None)) From f0cae26d58f6f907236112be5f4eaecc376b1304 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 3 Feb 2023 11:48:13 -0500 Subject: [PATCH 115/344] Add a docstring & tests for _flatten_dict. (#14981) --- changelog.d/14981.misc | 1 + synapse/push/bulk_push_rule_evaluator.py | 23 +++++++++++++++++++++ tests/push/test_push_rule_evaluator.py | 26 +++++++++++++++++++++++- 3 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 changelog.d/14981.misc diff --git a/changelog.d/14981.misc b/changelog.d/14981.misc new file mode 100644 index 000000000000..68ac8335fc4c --- /dev/null +++ b/changelog.d/14981.misc @@ -0,0 +1 @@ +Add tests for `_flatten_dict`. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 9bf92b9765e4..20369f3dfeee 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -473,6 +473,29 @@ def _flatten_dict( prefix: Optional[List[str]] = None, result: Optional[Dict[str, str]] = None, ) -> Dict[str, str]: + """ + Given a JSON dictionary (or event) which might contain sub dictionaries, + flatten it into a single layer dictionary by combining the keys & sub-keys. + + Any (non-dictionary), non-string value is dropped. + + Transforms: + + {"foo": {"bar": "test"}} + + To: + + {"foo.bar": "test"} + + Args: + d: The event or content to continue flattening. + room_version: The room version object. + prefix: The key prefix (from outer dictionaries). + result: The result to mutate. + + Returns: + The resulting dictionary. + """ if prefix is None: prefix = [] if result is None: diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 81661e181bf9..7c430c4ecbd8 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, List, Optional, Set, Union, cast +from typing import Any, Dict, List, Optional, Set, Union, cast import frozendict @@ -37,6 +37,30 @@ from tests.test_utils.event_injection import create_event, inject_member_event +class FlattenDictTestCase(unittest.TestCase): + def test_simple(self) -> None: + """Test a dictionary that isn't modified.""" + input = {"foo": "abc"} + self.assertEqual(input, _flatten_dict(input)) + + def test_nested(self) -> None: + """Nested dictionaries become dotted paths.""" + input = {"foo": {"bar": "abc"}} + self.assertEqual({"foo.bar": "abc"}, _flatten_dict(input)) + + def test_non_string(self) -> None: + """Non-string items are dropped.""" + input: Dict[str, Any] = { + "woo": "woo", + "foo": True, + "bar": 1, + "baz": None, + "fuzz": [], + "boo": {}, + } + self.assertEqual({"woo": "woo"}, _flatten_dict(input)) + + class PushRuleEvaluatorTestCase(unittest.TestCase): def _get_evaluator( self, From e301ee6189f9b24ecf4ff5a2eb83529d3d32dc98 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 3 Feb 2023 19:22:40 +0000 Subject: [PATCH 116/344] Properly typecheck tests.app (#14984 --- changelog.d/14984.misc | 1 + mypy.ini | 4 +++- tests/app/test_homeserver_start.py | 2 +- tests/app/test_openid_listener.py | 16 +++++++++++----- tests/app/test_phone_stats_home.py | 21 +++++++++++++-------- 5 files changed, 29 insertions(+), 15 deletions(-) create mode 100644 changelog.d/14984.misc diff --git a/changelog.d/14984.misc b/changelog.d/14984.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/14984.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 978d92940b20..57f43395bbb5 100644 --- a/mypy.ini +++ b/mypy.ini @@ -33,7 +33,6 @@ exclude = (?x) |synapse/storage/schema/ |tests/api/test_auth.py - |tests/app/test_openid_listener.py |tests/appservice/test_scheduler.py |tests/federation/test_federation_catch_up.py |tests/federation/test_federation_sender.py @@ -74,6 +73,9 @@ disallow_untyped_defs = False [mypy-tests.*] disallow_untyped_defs = False +[mypy-tests.app.*] +disallow_untyped_defs = True + [mypy-tests.config.*] disallow_untyped_defs = True diff --git a/tests/app/test_homeserver_start.py b/tests/app/test_homeserver_start.py index cbcada04517e..788c93553715 100644 --- a/tests/app/test_homeserver_start.py +++ b/tests/app/test_homeserver_start.py @@ -19,7 +19,7 @@ class HomeserverAppStartTestCase(ConfigFileTestCase): - def test_wrong_start_caught(self): + def test_wrong_start_caught(self) -> None: # Generate a config with a worker_app self.generate_config() # Add a blank line as otherwise the next addition ends up on a line with a comment diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 8d03da7f96a6..5d89ba94ad8f 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -11,26 +11,32 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import List from unittest.mock import Mock, patch from parameterized import parameterized +from twisted.test.proto_helpers import MemoryReactor + from synapse.app.generic_worker import GenericWorkerServer from synapse.app.homeserver import SynapseHomeServer from synapse.config.server import parse_listener_def +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests.server import make_request from tests.unittest import HomeserverTestCase class FederationReaderOpenIDListenerTests(HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver( federation_http_client=None, homeserver_to_use=GenericWorkerServer ) return hs - def default_config(self): + def default_config(self) -> JsonDict: conf = super().default_config() # we're using FederationReaderServer, which uses a SlavedStore, so we # have to tell the FederationHandler not to try to access stuff that is only @@ -47,7 +53,7 @@ def default_config(self): (["openid"], "auth_fail"), ] ) - def test_openid_listener(self, names, expectation): + def test_openid_listener(self, names: List[str], expectation: str) -> None: """ Test different openid listener configurations. @@ -81,7 +87,7 @@ def test_openid_listener(self, names, expectation): @patch("synapse.app.homeserver.KeyResource", new=Mock()) class SynapseHomeserverOpenIDListenerTests(HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver( federation_http_client=None, homeserver_to_use=SynapseHomeServer ) @@ -95,7 +101,7 @@ def make_homeserver(self, reactor, clock): (["openid"], "auth_fail"), ] ) - def test_openid_listener(self, names, expectation): + def test_openid_listener(self, names: List[str], expectation: str) -> None: """ Test different openid listener configurations. diff --git a/tests/app/test_phone_stats_home.py b/tests/app/test_phone_stats_home.py index df731eb599cc..a860eedbcfef 100644 --- a/tests/app/test_phone_stats_home.py +++ b/tests/app/test_phone_stats_home.py @@ -1,8 +1,11 @@ import synapse from synapse.app.phone_stats_home import start_phone_stats_home from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest +from tests.server import ThreadedMemoryReactorClock from tests.unittest import HomeserverTestCase FIVE_MINUTES_IN_SECONDS = 300 @@ -19,7 +22,7 @@ class PhoneHomeTestCase(HomeserverTestCase): # Override the retention time for the user_ips table because otherwise it # gets pruned too aggressively for our R30 test. @unittest.override_config({"user_ips_max_age": "365d"}) - def test_r30_minimum_usage(self): + def test_r30_minimum_usage(self) -> None: """ Tests the minimum amount of interaction necessary for the R30 metric to consider a user 'retained'. @@ -68,7 +71,7 @@ def test_r30_minimum_usage(self): r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 0}) - def test_r30_minimum_usage_using_default_config(self): + def test_r30_minimum_usage_using_default_config(self) -> None: """ Tests the minimum amount of interaction necessary for the R30 metric to consider a user 'retained'. @@ -122,7 +125,7 @@ def test_r30_minimum_usage_using_default_config(self): r30_results = self.get_success(self.hs.get_datastores().main.count_r30_users()) self.assertEqual(r30_results, {"all": 0}) - def test_r30_user_must_be_retained_for_at_least_a_month(self): + def test_r30_user_must_be_retained_for_at_least_a_month(self) -> None: """ Tests that a newly-registered user must be retained for a whole month before appearing in the R30 statistic, even if they post every day @@ -164,12 +167,14 @@ class PhoneHomeR30V2TestCase(HomeserverTestCase): login.register_servlets, ] - def _advance_to(self, desired_time_secs: float): + def _advance_to(self, desired_time_secs: float) -> None: now = self.hs.get_clock().time() assert now < desired_time_secs self.reactor.advance(desired_time_secs - now) - def make_homeserver(self, reactor, clock): + def make_homeserver( + self, reactor: ThreadedMemoryReactorClock, clock: Clock + ) -> HomeServer: hs = super(PhoneHomeR30V2TestCase, self).make_homeserver(reactor, clock) # We don't want our tests to actually report statistics, so check @@ -181,7 +186,7 @@ def make_homeserver(self, reactor, clock): start_phone_stats_home(hs) return hs - def test_r30v2_minimum_usage(self): + def test_r30v2_minimum_usage(self) -> None: """ Tests the minimum amount of interaction necessary for the R30v2 metric to consider a user 'retained'. @@ -250,7 +255,7 @@ def test_r30v2_minimum_usage(self): r30_results, {"all": 0, "android": 0, "electron": 0, "ios": 0, "web": 0} ) - def test_r30v2_user_must_be_retained_for_at_least_a_month(self): + def test_r30v2_user_must_be_retained_for_at_least_a_month(self) -> None: """ Tests that a newly-registered user must be retained for a whole month before appearing in the R30v2 statistic, even if they post every day @@ -316,7 +321,7 @@ def test_r30v2_user_must_be_retained_for_at_least_a_month(self): r30_results, {"all": 1, "android": 1, "electron": 0, "ios": 0, "web": 0} ) - def test_r30v2_returning_dormant_users_not_counted(self): + def test_r30v2_returning_dormant_users_not_counted(self) -> None: """ Tests that dormant users (users inactive for a long time) do not contribute to R30v2 when they return for just a single day. From b2d97bac0910c4730ea83fbee50abbdce2ba23be Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 3 Feb 2023 14:31:14 -0500 Subject: [PATCH 117/344] Implement MSC3958: suppress notifications from edits (#14960) Co-authored-by: Brad Murray Co-authored-by: Nick Barrett Copy the suppress_edits push rule from Beeper to implement MSC3958. https://github.com/beeper/synapse/blame/9415a1284b1bfb558bd66f28c24ca1611e6c6fa2/rust/src/push/base_rules.rs#L98-L114 --- changelog.d/14960.feature | 1 + rust/benches/evaluator.rs | 1 + rust/src/push/base_rules.rs | 17 +++++++++ rust/src/push/evaluator.rs | 2 +- rust/src/push/mod.rs | 8 ++++ stubs/synapse/synapse_rust/push.pyi | 1 + synapse/config/experimental.py | 5 +++ synapse/storage/databases/main/push_rule.py | 1 + tests/push/test_bulk_push_rule_evaluator.py | 42 ++++++++++++++++++++- 9 files changed, 76 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14960.feature diff --git a/changelog.d/14960.feature b/changelog.d/14960.feature new file mode 100644 index 000000000000..b9bb331273a7 --- /dev/null +++ b/changelog.d/14960.feature @@ -0,0 +1 @@ +Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 859d54961cad..35f7a50bcea0 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -170,6 +170,7 @@ fn bench_eval_message(b: &mut Bencher) { false, false, false, + false, ); b.iter(|| eval.run(&rules, Some("bob"), Some("person"))); diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 49add4e9519a..e9af26dd4f8f 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -63,6 +63,23 @@ pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule { }]; pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ + // We don't want to notify on edits. Not only can this be confusing in real + // time (2 notifications, one message) but it's especially confusing + // if a bridge needs to edit a previously backfilled message. + PushRule { + rule_id: Cow::Borrowed("global/override/.com.beeper.suppress_edits"), + priority_class: 5, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("content.m.relates_to.rel_type"), + pattern: Some(Cow::Borrowed("m.replace")), + pattern_type: None, + }, + ))]), + actions: Cow::Borrowed(&[Action::DontNotify]), + default: true, + default_enabled: true, + }, PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"), priority_class: 5, diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index da6f704c0ec6..ec7a8c44530f 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -523,7 +523,7 @@ fn test_requires_room_version_supports_condition() { }; let rules = PushRules::new(vec![custom_rule]); result = evaluator.run( - &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false), + &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false), None, None, ); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 7e449f243371..3c4f876cabf5 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -419,6 +419,7 @@ pub struct FilteredPushRules { msc3381_polls_enabled: bool, msc3664_enabled: bool, msc3952_intentional_mentions: bool, + msc3958_suppress_edits_enabled: bool, } #[pymethods] @@ -431,6 +432,7 @@ impl FilteredPushRules { msc3381_polls_enabled: bool, msc3664_enabled: bool, msc3952_intentional_mentions: bool, + msc3958_suppress_edits_enabled: bool, ) -> Self { Self { push_rules, @@ -439,6 +441,7 @@ impl FilteredPushRules { msc3381_polls_enabled, msc3664_enabled, msc3952_intentional_mentions, + msc3958_suppress_edits_enabled, } } @@ -476,6 +479,11 @@ impl FilteredPushRules { { return false; } + if !self.msc3958_suppress_edits_enabled + && rule.rule_id == "global/override/.com.beeper.suppress_edits" + { + return false; + } true }) diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index c0af2af3df1f..754acab2f978 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -47,6 +47,7 @@ class FilteredPushRules: msc3381_polls_enabled: bool, msc3664_enabled: bool, msc3952_intentional_mentions: bool, + msc3958_suppress_edits_enabled: bool, ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index d2d0270dddb1..53c0682dfdb6 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -173,3 +173,8 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.msc3952_intentional_mentions = experimental.get( "msc3952_intentional_mentions", False ) + + # MSC3959: Do not generate notifications for edits. + self.msc3958_supress_edit_notifs = experimental.get( + "msc3958_supress_edit_notifs", False + ) diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 466a1145b78c..9b2bbe060ddc 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -90,6 +90,7 @@ def _load_rules( msc3664_enabled=experimental_config.msc3664_enabled, msc3381_polls_enabled=experimental_config.msc3381_polls_enabled, msc3952_intentional_mentions=experimental_config.msc3952_intentional_mentions, + msc3958_suppress_edits_enabled=experimental_config.msc3958_supress_edit_notifs, ) return filtered_rules diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 3b2d082dcb2b..7567756135b7 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -19,7 +19,7 @@ from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import EventContentFields +from synapse.api.constants import EventContentFields, RelationTypes from synapse.api.room_versions import RoomVersions from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator from synapse.rest import admin @@ -370,3 +370,43 @@ def test_room_mentions(self) -> None: }, ) ) + + @override_config({"experimental_features": {"msc3958_supress_edit_notifs": True}}) + def test_suppress_edits(self) -> None: + """Under the default push rules, event edits should not generate notifications.""" + bulk_evaluator = BulkPushRuleEvaluator(self.hs) + + # Create & persist an event to use as the parent of the relation. + event, context = self.get_success( + self.event_creation_handler.create_event( + self.requester, + { + "type": "m.room.message", + "room_id": self.room_id, + "content": { + "msgtype": "m.text", + "body": "helo", + }, + "sender": self.alice, + }, + ) + ) + self.get_success( + self.event_creation_handler.handle_new_client_event( + self.requester, events_and_context=[(event, context)] + ) + ) + + # Room mentions from those without power should not notify. + self.assertFalse( + self._create_and_process( + bulk_evaluator, + { + "body": self.alice, + "m.relates_to": { + "rel_type": RelationTypes.REPLACE, + "event_id": event.event_id, + }, + }, + ) + ) From 6e6edea6c15dc1a15f44d9e92d334e3ce0f827dd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 3 Feb 2023 20:03:23 +0000 Subject: [PATCH 118/344] Properly typecheck tests.api (#14983) --- changelog.d/14983.misc | 1 + mypy.ini | 4 +- synapse/api/filtering.py | 4 +- tests/api/test_auth.py | 64 ++++++++------ tests/api/test_filtering.py | 157 ++++++++++++++++++--------------- tests/api/test_ratelimiting.py | 18 ++-- tests/events/test_utils.py | 2 + 7 files changed, 140 insertions(+), 110 deletions(-) create mode 100644 changelog.d/14983.misc diff --git a/changelog.d/14983.misc b/changelog.d/14983.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/14983.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 57f43395bbb5..a6e37bc3775a 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,7 +32,6 @@ exclude = (?x) |synapse/storage/databases/main/cache.py |synapse/storage/schema/ - |tests/api/test_auth.py |tests/appservice/test_scheduler.py |tests/federation/test_federation_catch_up.py |tests/federation/test_federation_sender.py @@ -73,6 +72,9 @@ disallow_untyped_defs = False [mypy-tests.*] disallow_untyped_defs = False +[mypy-tests.api.*] +disallow_untyped_defs = True + [mypy-tests.app.*] disallow_untyped_defs = True diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 2b5af264b43d..83c42fc25a22 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -252,9 +252,9 @@ def unread_thread_notifications(self) -> bool: return self._room_timeline_filter.unread_thread_notifications async def filter_presence( - self, events: Iterable[UserPresenceState] + self, presence_states: Iterable[UserPresenceState] ) -> List[UserPresenceState]: - return await self._presence_filter.filter(events) + return await self._presence_filter.filter(presence_states) async def filter_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]: return await self._account_data.filter(events) diff --git a/tests/api/test_auth.py b/tests/api/test_auth.py index e0f363555b3c..6e36e73f0d75 100644 --- a/tests/api/test_auth.py +++ b/tests/api/test_auth.py @@ -31,7 +31,7 @@ from synapse.appservice import ApplicationService from synapse.server import HomeServer from synapse.storage.databases.main.registration import TokenLookupResult -from synapse.types import Requester +from synapse.types import Requester, UserID from synapse.util import Clock from tests import unittest @@ -41,10 +41,12 @@ class AuthTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = Mock() - hs.datastores.main = self.store + # type-ignore: datastores is None until hs.setup() is called---but it'll + # have been called by the HomeserverTestCase machinery. + hs.datastores.main = self.store # type: ignore[union-attr] hs.get_auth_handler().store = self.store self.auth = Auth(hs) @@ -61,7 +63,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): self.store.insert_client_ip = simple_async_mock(None) self.store.is_support_user = simple_async_mock(False) - def test_get_user_by_req_user_valid_token(self): + def test_get_user_by_req_user_valid_token(self) -> None: user_info = TokenLookupResult( user_id=self.test_user, token_id=5, device_id="device" ) @@ -74,7 +76,7 @@ def test_get_user_by_req_user_valid_token(self): requester = self.get_success(self.auth.get_user_by_req(request)) self.assertEqual(requester.user.to_string(), self.test_user) - def test_get_user_by_req_user_bad_token(self): + def test_get_user_by_req_user_bad_token(self) -> None: self.store.get_user_by_access_token = simple_async_mock(None) request = Mock(args={}) @@ -86,7 +88,7 @@ def test_get_user_by_req_user_bad_token(self): self.assertEqual(f.code, 401) self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN") - def test_get_user_by_req_user_missing_token(self): + def test_get_user_by_req_user_missing_token(self) -> None: user_info = TokenLookupResult(user_id=self.test_user, token_id=5) self.store.get_user_by_access_token = simple_async_mock(user_info) @@ -98,7 +100,7 @@ def test_get_user_by_req_user_missing_token(self): self.assertEqual(f.code, 401) self.assertEqual(f.errcode, "M_MISSING_TOKEN") - def test_get_user_by_req_appservice_valid_token(self): + def test_get_user_by_req_appservice_valid_token(self) -> None: app_service = Mock( token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None ) @@ -112,7 +114,7 @@ def test_get_user_by_req_appservice_valid_token(self): requester = self.get_success(self.auth.get_user_by_req(request)) self.assertEqual(requester.user.to_string(), self.test_user) - def test_get_user_by_req_appservice_valid_token_good_ip(self): + def test_get_user_by_req_appservice_valid_token_good_ip(self) -> None: from netaddr import IPSet app_service = Mock( @@ -131,7 +133,7 @@ def test_get_user_by_req_appservice_valid_token_good_ip(self): requester = self.get_success(self.auth.get_user_by_req(request)) self.assertEqual(requester.user.to_string(), self.test_user) - def test_get_user_by_req_appservice_valid_token_bad_ip(self): + def test_get_user_by_req_appservice_valid_token_bad_ip(self) -> None: from netaddr import IPSet app_service = Mock( @@ -153,7 +155,7 @@ def test_get_user_by_req_appservice_valid_token_bad_ip(self): self.assertEqual(f.code, 401) self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN") - def test_get_user_by_req_appservice_bad_token(self): + def test_get_user_by_req_appservice_bad_token(self) -> None: self.store.get_app_service_by_token = Mock(return_value=None) self.store.get_user_by_access_token = simple_async_mock(None) @@ -166,7 +168,7 @@ def test_get_user_by_req_appservice_bad_token(self): self.assertEqual(f.code, 401) self.assertEqual(f.errcode, "M_UNKNOWN_TOKEN") - def test_get_user_by_req_appservice_missing_token(self): + def test_get_user_by_req_appservice_missing_token(self) -> None: app_service = Mock(token="foobar", url="a_url", sender=self.test_user) self.store.get_app_service_by_token = Mock(return_value=app_service) self.store.get_user_by_access_token = simple_async_mock(None) @@ -179,7 +181,7 @@ def test_get_user_by_req_appservice_missing_token(self): self.assertEqual(f.code, 401) self.assertEqual(f.errcode, "M_MISSING_TOKEN") - def test_get_user_by_req_appservice_valid_token_valid_user_id(self): + def test_get_user_by_req_appservice_valid_token_valid_user_id(self) -> None: masquerading_user_id = b"@doppelganger:matrix.org" app_service = Mock( token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None @@ -200,7 +202,7 @@ def test_get_user_by_req_appservice_valid_token_valid_user_id(self): requester.user.to_string(), masquerading_user_id.decode("utf8") ) - def test_get_user_by_req_appservice_valid_token_bad_user_id(self): + def test_get_user_by_req_appservice_valid_token_bad_user_id(self) -> None: masquerading_user_id = b"@doppelganger:matrix.org" app_service = Mock( token="foobar", url="a_url", sender=self.test_user, ip_range_whitelist=None @@ -217,7 +219,7 @@ def test_get_user_by_req_appservice_valid_token_bad_user_id(self): self.get_failure(self.auth.get_user_by_req(request), AuthError) @override_config({"experimental_features": {"msc3202_device_masquerading": True}}) - def test_get_user_by_req_appservice_valid_token_valid_device_id(self): + def test_get_user_by_req_appservice_valid_token_valid_device_id(self) -> None: """ Tests that when an application service passes the device_id URL parameter with the ID of a valid device for the user in question, @@ -249,7 +251,7 @@ def test_get_user_by_req_appservice_valid_token_valid_device_id(self): self.assertEqual(requester.device_id, masquerading_device_id.decode("utf8")) @override_config({"experimental_features": {"msc3202_device_masquerading": True}}) - def test_get_user_by_req_appservice_valid_token_invalid_device_id(self): + def test_get_user_by_req_appservice_valid_token_invalid_device_id(self) -> None: """ Tests that when an application service passes the device_id URL parameter with an ID that is not a valid device ID for the user in question, @@ -279,7 +281,7 @@ def test_get_user_by_req_appservice_valid_token_invalid_device_id(self): self.assertEqual(failure.value.code, 400) self.assertEqual(failure.value.errcode, Codes.EXCLUSIVE) - def test_get_user_by_req__puppeted_token__not_tracking_puppeted_mau(self): + def test_get_user_by_req__puppeted_token__not_tracking_puppeted_mau(self) -> None: self.store.get_user_by_access_token = simple_async_mock( TokenLookupResult( user_id="@baldrick:matrix.org", @@ -298,7 +300,7 @@ def test_get_user_by_req__puppeted_token__not_tracking_puppeted_mau(self): self.get_success(self.auth.get_user_by_req(request)) self.store.insert_client_ip.assert_called_once() - def test_get_user_by_req__puppeted_token__tracking_puppeted_mau(self): + def test_get_user_by_req__puppeted_token__tracking_puppeted_mau(self) -> None: self.auth._track_puppeted_user_ips = True self.store.get_user_by_access_token = simple_async_mock( TokenLookupResult( @@ -318,7 +320,7 @@ def test_get_user_by_req__puppeted_token__tracking_puppeted_mau(self): self.get_success(self.auth.get_user_by_req(request)) self.assertEqual(self.store.insert_client_ip.call_count, 2) - def test_get_user_from_macaroon(self): + def test_get_user_from_macaroon(self) -> None: self.store.get_user_by_access_token = simple_async_mock(None) user_id = "@baldrick:matrix.org" @@ -336,7 +338,7 @@ def test_get_user_from_macaroon(self): self.auth.get_user_by_access_token(serialized), InvalidClientTokenError ) - def test_get_guest_user_from_macaroon(self): + def test_get_guest_user_from_macaroon(self) -> None: self.store.get_user_by_id = simple_async_mock({"is_guest": True}) self.store.get_user_by_access_token = simple_async_mock(None) @@ -357,7 +359,7 @@ def test_get_guest_user_from_macaroon(self): self.assertTrue(user_info.is_guest) self.store.get_user_by_id.assert_called_with(user_id) - def test_blocking_mau(self): + def test_blocking_mau(self) -> None: self.auth_blocking._limit_usage_by_mau = False self.auth_blocking._max_mau_value = 50 lots_of_users = 100 @@ -381,7 +383,7 @@ def test_blocking_mau(self): self.store.get_monthly_active_count = simple_async_mock(small_number_of_users) self.get_success(self.auth_blocking.check_auth_blocking()) - def test_blocking_mau__depending_on_user_type(self): + def test_blocking_mau__depending_on_user_type(self) -> None: self.auth_blocking._max_mau_value = 50 self.auth_blocking._limit_usage_by_mau = True @@ -400,7 +402,9 @@ def test_blocking_mau__depending_on_user_type(self): # Real users not allowed self.get_failure(self.auth_blocking.check_auth_blocking(), ResourceLimitError) - def test_blocking_mau__appservice_requester_allowed_when_not_tracking_ips(self): + def test_blocking_mau__appservice_requester_allowed_when_not_tracking_ips( + self, + ) -> None: self.auth_blocking._max_mau_value = 50 self.auth_blocking._limit_usage_by_mau = True self.auth_blocking._track_appservice_user_ips = False @@ -418,7 +422,7 @@ def test_blocking_mau__appservice_requester_allowed_when_not_tracking_ips(self): sender="@appservice:sender", ) requester = Requester( - user="@appservice:server", + user=UserID.from_string("@appservice:server"), access_token_id=None, device_id="FOOBAR", is_guest=False, @@ -428,7 +432,9 @@ def test_blocking_mau__appservice_requester_allowed_when_not_tracking_ips(self): ) self.get_success(self.auth_blocking.check_auth_blocking(requester=requester)) - def test_blocking_mau__appservice_requester_disallowed_when_tracking_ips(self): + def test_blocking_mau__appservice_requester_disallowed_when_tracking_ips( + self, + ) -> None: self.auth_blocking._max_mau_value = 50 self.auth_blocking._limit_usage_by_mau = True self.auth_blocking._track_appservice_user_ips = True @@ -446,7 +452,7 @@ def test_blocking_mau__appservice_requester_disallowed_when_tracking_ips(self): sender="@appservice:sender", ) requester = Requester( - user="@appservice:server", + user=UserID.from_string("@appservice:server"), access_token_id=None, device_id="FOOBAR", is_guest=False, @@ -459,7 +465,7 @@ def test_blocking_mau__appservice_requester_disallowed_when_tracking_ips(self): ResourceLimitError, ) - def test_reserved_threepid(self): + def test_reserved_threepid(self) -> None: self.auth_blocking._limit_usage_by_mau = True self.auth_blocking._max_mau_value = 1 self.store.get_monthly_active_count = simple_async_mock(2) @@ -476,7 +482,7 @@ def test_reserved_threepid(self): self.get_success(self.auth_blocking.check_auth_blocking(threepid=threepid)) - def test_hs_disabled(self): + def test_hs_disabled(self) -> None: self.auth_blocking._hs_disabled = True self.auth_blocking._hs_disabled_message = "Reason for being disabled" e = self.get_failure( @@ -486,7 +492,7 @@ def test_hs_disabled(self): self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) - def test_hs_disabled_no_server_notices_user(self): + def test_hs_disabled_no_server_notices_user(self) -> None: """Check that 'hs_disabled_message' works correctly when there is no server_notices user. """ @@ -503,7 +509,7 @@ def test_hs_disabled_no_server_notices_user(self): self.assertEqual(e.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) self.assertEqual(e.value.code, 403) - def test_server_notices_mxid_special_cased(self): + def test_server_notices_mxid_special_cased(self) -> None: self.auth_blocking._hs_disabled = True user = "@user:server" self.auth_blocking._server_notices_mxid = user diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index d5524d296e38..0f45615160ed 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -14,40 +14,36 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from typing import List from unittest.mock import patch import jsonschema from frozendict import frozendict +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EduTypes, EventContentFields from synapse.api.errors import SynapseError from synapse.api.filtering import Filter -from synapse.events import make_event_from_dict +from synapse.api.presence import UserPresenceState +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest +from tests.events.test_utils import MockEvent user_localpart = "test_user" -def MockEvent(**kwargs): - if "event_id" not in kwargs: - kwargs["event_id"] = "fake_event_id" - if "type" not in kwargs: - kwargs["type"] = "fake_type" - if "content" not in kwargs: - kwargs["content"] = {} - return make_event_from_dict(kwargs) - - class FilteringTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.filtering = hs.get_filtering() self.datastore = hs.get_datastores().main - def test_errors_on_invalid_filters(self): + def test_errors_on_invalid_filters(self) -> None: # See USER_FILTER_SCHEMA for the filter schema. - invalid_filters = [ + invalid_filters: List[JsonDict] = [ # `account_data` must be a dictionary {"account_data": "Hello World"}, # `event_fields` entries must not contain backslashes @@ -63,10 +59,10 @@ def test_errors_on_invalid_filters(self): with self.assertRaises(SynapseError): self.filtering.check_valid_filter(filter) - def test_ignores_unknown_filter_fields(self): + def test_ignores_unknown_filter_fields(self) -> None: # For forward compatibility, we must ignore unknown filter fields. # See USER_FILTER_SCHEMA for the filter schema. - filters = [ + filters: List[JsonDict] = [ {"org.matrix.msc9999.future_option": True}, {"presence": {"org.matrix.msc9999.future_option": True}}, {"room": {"org.matrix.msc9999.future_option": True}}, @@ -76,8 +72,8 @@ def test_ignores_unknown_filter_fields(self): self.filtering.check_valid_filter(filter) # Must not raise. - def test_valid_filters(self): - valid_filters = [ + def test_valid_filters(self) -> None: + valid_filters: List[JsonDict] = [ { "room": { "timeline": {"limit": 20}, @@ -132,22 +128,22 @@ def test_valid_filters(self): except jsonschema.ValidationError as e: self.fail(e) - def test_limits_are_applied(self): + def test_limits_are_applied(self) -> None: # TODO pass - def test_definition_types_works_with_literals(self): + def test_definition_types_works_with_literals(self) -> None: definition = {"types": ["m.room.message", "org.matrix.foo.bar"]} event = MockEvent(sender="@foo:bar", type="m.room.message", room_id="!foo:bar") self.assertTrue(Filter(self.hs, definition)._check(event)) - def test_definition_types_works_with_wildcards(self): + def test_definition_types_works_with_wildcards(self) -> None: definition = {"types": ["m.*", "org.matrix.foo.bar"]} event = MockEvent(sender="@foo:bar", type="m.room.message", room_id="!foo:bar") self.assertTrue(Filter(self.hs, definition)._check(event)) - def test_definition_types_works_with_unknowns(self): + def test_definition_types_works_with_unknowns(self) -> None: definition = {"types": ["m.room.message", "org.matrix.foo.bar"]} event = MockEvent( sender="@foo:bar", @@ -156,24 +152,24 @@ def test_definition_types_works_with_unknowns(self): ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_not_types_works_with_literals(self): + def test_definition_not_types_works_with_literals(self) -> None: definition = {"not_types": ["m.room.message", "org.matrix.foo.bar"]} event = MockEvent(sender="@foo:bar", type="m.room.message", room_id="!foo:bar") self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_not_types_works_with_wildcards(self): + def test_definition_not_types_works_with_wildcards(self) -> None: definition = {"not_types": ["m.room.message", "org.matrix.*"]} event = MockEvent( sender="@foo:bar", type="org.matrix.custom.event", room_id="!foo:bar" ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_not_types_works_with_unknowns(self): + def test_definition_not_types_works_with_unknowns(self) -> None: definition = {"not_types": ["m.*", "org.*"]} event = MockEvent(sender="@foo:bar", type="com.nom.nom.nom", room_id="!foo:bar") self.assertTrue(Filter(self.hs, definition)._check(event)) - def test_definition_not_types_takes_priority_over_types(self): + def test_definition_not_types_takes_priority_over_types(self) -> None: definition = { "not_types": ["m.*", "org.*"], "types": ["m.room.message", "m.room.topic"], @@ -181,35 +177,35 @@ def test_definition_not_types_takes_priority_over_types(self): event = MockEvent(sender="@foo:bar", type="m.room.topic", room_id="!foo:bar") self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_senders_works_with_literals(self): + def test_definition_senders_works_with_literals(self) -> None: definition = {"senders": ["@flibble:wibble"]} event = MockEvent( sender="@flibble:wibble", type="com.nom.nom.nom", room_id="!foo:bar" ) self.assertTrue(Filter(self.hs, definition)._check(event)) - def test_definition_senders_works_with_unknowns(self): + def test_definition_senders_works_with_unknowns(self) -> None: definition = {"senders": ["@flibble:wibble"]} event = MockEvent( sender="@challenger:appears", type="com.nom.nom.nom", room_id="!foo:bar" ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_not_senders_works_with_literals(self): + def test_definition_not_senders_works_with_literals(self) -> None: definition = {"not_senders": ["@flibble:wibble"]} event = MockEvent( sender="@flibble:wibble", type="com.nom.nom.nom", room_id="!foo:bar" ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_not_senders_works_with_unknowns(self): + def test_definition_not_senders_works_with_unknowns(self) -> None: definition = {"not_senders": ["@flibble:wibble"]} event = MockEvent( sender="@challenger:appears", type="com.nom.nom.nom", room_id="!foo:bar" ) self.assertTrue(Filter(self.hs, definition)._check(event)) - def test_definition_not_senders_takes_priority_over_senders(self): + def test_definition_not_senders_takes_priority_over_senders(self) -> None: definition = { "not_senders": ["@misspiggy:muppets"], "senders": ["@kermit:muppets", "@misspiggy:muppets"], @@ -219,14 +215,14 @@ def test_definition_not_senders_takes_priority_over_senders(self): ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_rooms_works_with_literals(self): + def test_definition_rooms_works_with_literals(self) -> None: definition = {"rooms": ["!secretbase:unknown"]} event = MockEvent( sender="@foo:bar", type="m.room.message", room_id="!secretbase:unknown" ) self.assertTrue(Filter(self.hs, definition)._check(event)) - def test_definition_rooms_works_with_unknowns(self): + def test_definition_rooms_works_with_unknowns(self) -> None: definition = {"rooms": ["!secretbase:unknown"]} event = MockEvent( sender="@foo:bar", @@ -235,7 +231,7 @@ def test_definition_rooms_works_with_unknowns(self): ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_not_rooms_works_with_literals(self): + def test_definition_not_rooms_works_with_literals(self) -> None: definition = {"not_rooms": ["!anothersecretbase:unknown"]} event = MockEvent( sender="@foo:bar", @@ -244,7 +240,7 @@ def test_definition_not_rooms_works_with_literals(self): ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_not_rooms_works_with_unknowns(self): + def test_definition_not_rooms_works_with_unknowns(self) -> None: definition = {"not_rooms": ["!secretbase:unknown"]} event = MockEvent( sender="@foo:bar", @@ -253,7 +249,7 @@ def test_definition_not_rooms_works_with_unknowns(self): ) self.assertTrue(Filter(self.hs, definition)._check(event)) - def test_definition_not_rooms_takes_priority_over_rooms(self): + def test_definition_not_rooms_takes_priority_over_rooms(self) -> None: definition = { "not_rooms": ["!secretbase:unknown"], "rooms": ["!secretbase:unknown"], @@ -263,7 +259,7 @@ def test_definition_not_rooms_takes_priority_over_rooms(self): ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_combined_event(self): + def test_definition_combined_event(self) -> None: definition = { "not_senders": ["@misspiggy:muppets"], "senders": ["@kermit:muppets"], @@ -279,7 +275,7 @@ def test_definition_combined_event(self): ) self.assertTrue(Filter(self.hs, definition)._check(event)) - def test_definition_combined_event_bad_sender(self): + def test_definition_combined_event_bad_sender(self) -> None: definition = { "not_senders": ["@misspiggy:muppets"], "senders": ["@kermit:muppets"], @@ -295,7 +291,7 @@ def test_definition_combined_event_bad_sender(self): ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_combined_event_bad_room(self): + def test_definition_combined_event_bad_room(self) -> None: definition = { "not_senders": ["@misspiggy:muppets"], "senders": ["@kermit:muppets"], @@ -311,7 +307,7 @@ def test_definition_combined_event_bad_room(self): ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_definition_combined_event_bad_type(self): + def test_definition_combined_event_bad_type(self) -> None: definition = { "not_senders": ["@misspiggy:muppets"], "senders": ["@kermit:muppets"], @@ -327,7 +323,7 @@ def test_definition_combined_event_bad_type(self): ) self.assertFalse(Filter(self.hs, definition)._check(event)) - def test_filter_labels(self): + def test_filter_labels(self) -> None: definition = {"org.matrix.labels": ["#fun"]} event = MockEvent( sender="@foo:bar", @@ -356,7 +352,7 @@ def test_filter_labels(self): ) self.assertTrue(Filter(self.hs, definition)._check(event)) - def test_filter_not_labels(self): + def test_filter_not_labels(self) -> None: definition = {"org.matrix.not_labels": ["#fun"]} event = MockEvent( sender="@foo:bar", @@ -377,7 +373,7 @@ def test_filter_not_labels(self): self.assertTrue(Filter(self.hs, definition)._check(event)) @unittest.override_config({"experimental_features": {"msc3874_enabled": True}}) - def test_filter_rel_type(self): + def test_filter_rel_type(self) -> None: definition = {"org.matrix.msc3874.rel_types": ["m.thread"]} event = MockEvent( sender="@foo:bar", @@ -407,7 +403,7 @@ def test_filter_rel_type(self): self.assertTrue(Filter(self.hs, definition)._check(event)) @unittest.override_config({"experimental_features": {"msc3874_enabled": True}}) - def test_filter_not_rel_type(self): + def test_filter_not_rel_type(self) -> None: definition = {"org.matrix.msc3874.not_rel_types": ["m.thread"]} event = MockEvent( sender="@foo:bar", @@ -436,15 +432,25 @@ def test_filter_not_rel_type(self): self.assertTrue(Filter(self.hs, definition)._check(event)) - def test_filter_presence_match(self): - user_filter_json = {"presence": {"types": ["m.*"]}} + def test_filter_presence_match(self) -> None: + """Check that filter_presence return events which matches the filter.""" + user_filter_json = {"presence": {"senders": ["@foo:bar"]}} filter_id = self.get_success( self.datastore.add_user_filter( user_localpart=user_localpart, user_filter=user_filter_json ) ) - event = MockEvent(sender="@foo:bar", type="m.profile") - events = [event] + presence_states = [ + UserPresenceState( + user_id="@foo:bar", + state="unavailable", + last_active_ts=0, + last_federation_update_ts=0, + last_user_sync_ts=0, + status_msg=None, + currently_active=False, + ), + ] user_filter = self.get_success( self.filtering.get_user_filter( @@ -452,23 +458,29 @@ def test_filter_presence_match(self): ) ) - results = self.get_success(user_filter.filter_presence(events=events)) - self.assertEqual(events, results) + results = self.get_success(user_filter.filter_presence(presence_states)) + self.assertEqual(presence_states, results) - def test_filter_presence_no_match(self): - user_filter_json = {"presence": {"types": ["m.*"]}} + def test_filter_presence_no_match(self) -> None: + """Check that filter_presence does not return events rejected by the filter.""" + user_filter_json = {"presence": {"not_senders": ["@foo:bar"]}} filter_id = self.get_success( self.datastore.add_user_filter( user_localpart=user_localpart + "2", user_filter=user_filter_json ) ) - event = MockEvent( - event_id="$asdasd:localhost", - sender="@foo:bar", - type="custom.avatar.3d.crazy", - ) - events = [event] + presence_states = [ + UserPresenceState( + user_id="@foo:bar", + state="unavailable", + last_active_ts=0, + last_federation_update_ts=0, + last_user_sync_ts=0, + status_msg=None, + currently_active=False, + ), + ] user_filter = self.get_success( self.filtering.get_user_filter( @@ -476,10 +488,10 @@ def test_filter_presence_no_match(self): ) ) - results = self.get_success(user_filter.filter_presence(events=events)) + results = self.get_success(user_filter.filter_presence(presence_states)) self.assertEqual([], results) - def test_filter_room_state_match(self): + def test_filter_room_state_match(self) -> None: user_filter_json = {"room": {"state": {"types": ["m.*"]}}} filter_id = self.get_success( self.datastore.add_user_filter( @@ -498,7 +510,7 @@ def test_filter_room_state_match(self): results = self.get_success(user_filter.filter_room_state(events=events)) self.assertEqual(events, results) - def test_filter_room_state_no_match(self): + def test_filter_room_state_no_match(self) -> None: user_filter_json = {"room": {"state": {"types": ["m.*"]}}} filter_id = self.get_success( self.datastore.add_user_filter( @@ -519,7 +531,7 @@ def test_filter_room_state_no_match(self): results = self.get_success(user_filter.filter_room_state(events)) self.assertEqual([], results) - def test_filter_rooms(self): + def test_filter_rooms(self) -> None: definition = { "rooms": ["!allowed:example.com", "!excluded:example.com"], "not_rooms": ["!excluded:example.com"], @@ -535,7 +547,7 @@ def test_filter_rooms(self): self.assertEqual(filtered_room_ids, ["!allowed:example.com"]) - def test_filter_relations(self): + def test_filter_relations(self) -> None: events = [ # An event without a relation. MockEvent( @@ -551,9 +563,8 @@ def test_filter_relations(self): type="org.matrix.custom.event", room_id="!foo:bar", ), - # Non-EventBase objects get passed through. - {}, ] + jsondicts: List[JsonDict] = [{}] # For the following tests we patch the datastore method (intead of injecting # events). This is a bit cheeky, but tests the logic of _check_event_relations. @@ -561,7 +572,7 @@ def test_filter_relations(self): # Filter for a particular sender. definition = {"related_by_senders": ["@foo:bar"]} - async def events_have_relations(*args, **kwargs): + async def events_have_relations(*args: object, **kwargs: object) -> List[str]: return ["$with_relation"] with patch.object( @@ -572,9 +583,17 @@ async def events_have_relations(*args, **kwargs): Filter(self.hs, definition)._check_event_relations(events) ) ) + # Non-EventBase objects get passed through. + filtered_jsondicts = list( + self.get_success( + Filter(self.hs, definition)._check_event_relations(jsondicts) + ) + ) + self.assertEqual(filtered_events, events[1:]) + self.assertEqual(filtered_jsondicts, [{}]) - def test_add_filter(self): + def test_add_filter(self) -> None: user_filter_json = {"room": {"state": {"types": ["m.*"]}}} filter_id = self.get_success( @@ -595,7 +614,7 @@ def test_add_filter(self): ), ) - def test_get_filter(self): + def test_get_filter(self) -> None: user_filter_json = {"room": {"state": {"types": ["m.*"]}}} filter_id = self.get_success( diff --git a/tests/api/test_ratelimiting.py b/tests/api/test_ratelimiting.py index b5fd08d43711..fa6c1c02ce95 100644 --- a/tests/api/test_ratelimiting.py +++ b/tests/api/test_ratelimiting.py @@ -6,7 +6,7 @@ class TestRatelimiter(unittest.HomeserverTestCase): - def test_allowed_via_can_do_action(self): + def test_allowed_via_can_do_action(self) -> None: limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, @@ -31,7 +31,7 @@ def test_allowed_via_can_do_action(self): self.assertTrue(allowed) self.assertEqual(20.0, time_allowed) - def test_allowed_appservice_ratelimited_via_can_requester_do_action(self): + def test_allowed_appservice_ratelimited_via_can_requester_do_action(self) -> None: appservice = ApplicationService( token="fake_token", id="foo", @@ -64,7 +64,7 @@ def test_allowed_appservice_ratelimited_via_can_requester_do_action(self): self.assertTrue(allowed) self.assertEqual(20.0, time_allowed) - def test_allowed_appservice_via_can_requester_do_action(self): + def test_allowed_appservice_via_can_requester_do_action(self) -> None: appservice = ApplicationService( token="fake_token", id="foo", @@ -97,7 +97,7 @@ def test_allowed_appservice_via_can_requester_do_action(self): self.assertTrue(allowed) self.assertEqual(-1, time_allowed) - def test_allowed_via_ratelimit(self): + def test_allowed_via_ratelimit(self) -> None: limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, @@ -120,7 +120,7 @@ def test_allowed_via_ratelimit(self): limiter.ratelimit(None, key="test_id", _time_now_s=10) ) - def test_allowed_via_can_do_action_and_overriding_parameters(self): + def test_allowed_via_can_do_action_and_overriding_parameters(self) -> None: """Test that we can override options of can_do_action that would otherwise fail an action """ @@ -169,7 +169,7 @@ def test_allowed_via_can_do_action_and_overriding_parameters(self): self.assertTrue(allowed) self.assertEqual(1.0, time_allowed) - def test_allowed_via_ratelimit_and_overriding_parameters(self): + def test_allowed_via_ratelimit_and_overriding_parameters(self) -> None: """Test that we can override options of the ratelimit method that would otherwise fail an action """ @@ -204,7 +204,7 @@ def test_allowed_via_ratelimit_and_overriding_parameters(self): limiter.ratelimit(None, key=("test_id",), _time_now_s=1, burst_count=10) ) - def test_pruning(self): + def test_pruning(self) -> None: limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, @@ -223,7 +223,7 @@ def test_pruning(self): self.assertNotIn("test_id_1", limiter.actions) - def test_db_user_override(self): + def test_db_user_override(self) -> None: """Test that users that have ratelimiting disabled in the DB aren't ratelimited. """ @@ -250,7 +250,7 @@ def test_db_user_override(self): for _ in range(20): self.get_success_or_raise(limiter.ratelimit(requester, _time_now_s=0)) - def test_multiple_actions(self): + def test_multiple_actions(self) -> None: limiter = Ratelimiter( store=self.hs.get_datastores().main, clock=self.clock, diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index ff7b349d756c..4174a237ec84 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -35,6 +35,8 @@ def MockEvent(**kwargs: Any) -> EventBase: kwargs["event_id"] = "fake_event_id" if "type" not in kwargs: kwargs["type"] = "fake_type" + if "content" not in kwargs: + kwargs["content"] = {} return make_event_from_dict(kwargs) From b3bf58a8a5f56674cb0ea0ab6c29aba5775dec52 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 6 Feb 2023 11:29:51 +0000 Subject: [PATCH 119/344] Only notify the target of a membership event (#14971) * Only notify the target of a membership event Naughty, but should be a big speedup in large rooms --- changelog.d/14971.misc | 1 + synapse/push/bulk_push_rule_evaluator.py | 38 +++++++++++++++++++----- 2 files changed, 31 insertions(+), 8 deletions(-) create mode 100644 changelog.d/14971.misc diff --git a/changelog.d/14971.misc b/changelog.d/14971.misc new file mode 100644 index 000000000000..130045a1237f --- /dev/null +++ b/changelog.d/14971.misc @@ -0,0 +1 @@ +Improve performance of joining and leaving large rooms with many local users. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 20369f3dfeee..f73dceb128bc 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -142,15 +142,34 @@ async def _get_rules_for_event( Returns: Mapping of user ID to their push rules. """ - # We get the users who may need to be notified by first fetching the - # local users currently in the room, finding those that have push rules, - # and *then* checking which users are actually allowed to see the event. - # - # The alternative is to first fetch all users that were joined at the - # event, but that requires fetching the full state at the event, which - # may be expensive for large rooms with few local users. + # If this is a membership event, only calculate push rules for the target. + # While it's possible for users to configure push rules to respond to such an + # event, in practise nobody does this. At the cost of violating the spec a + # little, we can skip fetching a huge number of push rules in large rooms. + # This helps make joins and leaves faster. + if event.type == EventTypes.Member: + local_users = [] + # We never notify a user about their own actions. This is enforced in + # `_action_for_event_by_user` in the loop over `rules_by_user`, but we + # do the same check here to avoid unnecessary DB queries. + if event.sender != event.state_key and self.hs.is_mine_id(event.state_key): + # Check the target is in the room, to avoid notifying them of + # e.g. a pre-emptive ban. + target_already_in_room = await self.store.check_local_user_in_room( + event.state_key, event.room_id + ) + if target_already_in_room: + local_users = [event.state_key] + else: + # We get the users who may need to be notified by first fetching the + # local users currently in the room, finding those that have push rules, + # and *then* checking which users are actually allowed to see the event. + # + # The alternative is to first fetch all users that were joined at the + # event, but that requires fetching the full state at the event, which + # may be expensive for large rooms with few local users. - local_users = await self.store.get_local_users_in_room(event.room_id) + local_users = await self.store.get_local_users_in_room(event.room_id) # Filter out appservice users. local_users = [ @@ -167,6 +186,9 @@ async def _get_rules_for_event( local_users = list(local_users) local_users.append(invited) + if not local_users: + return {} + rules_by_user = await self.store.bulk_get_push_rules(local_users) logger.debug("Users in room: %s", local_users) From f3f495c4e3d00a1d318785a9f4cce1961883576f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Feb 2023 11:33:23 +0000 Subject: [PATCH 120/344] Bump hiredis from 2.1.1 to 2.2.1 (#14993) * Bump hiredis from 2.1.1 to 2.2.1 Bumps [hiredis](https://github.com/redis/hiredis-py) from 2.1.1 to 2.2.1. - [Release notes](https://github.com/redis/hiredis-py/releases) - [Changelog](https://github.com/redis/hiredis-py/blob/master/CHANGELOG.md) - [Commits](https://github.com/redis/hiredis-py/compare/v2.1.1...v2.2.1) --- updated-dependencies: - dependency-name: hiredis dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14993.misc | 1 + poetry.lock | 180 ++++++++++++++++++++--------------------- 2 files changed, 91 insertions(+), 90 deletions(-) create mode 100644 changelog.d/14993.misc diff --git a/changelog.d/14993.misc b/changelog.d/14993.misc new file mode 100644 index 000000000000..77f3528e2b9d --- /dev/null +++ b/changelog.d/14993.misc @@ -0,0 +1 @@ +Bump hiredis from 2.1.1 to 2.2.1. diff --git a/poetry.lock b/poetry.lock index 2d58788709a5..9170f81ac591 100644 --- a/poetry.lock +++ b/poetry.lock @@ -501,101 +501,101 @@ typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\"" [[package]] name = "hiredis" -version = "2.1.1" +version = "2.2.1" description = "Python wrapper for hiredis" category = "main" optional = true python-versions = ">=3.7" files = [ - {file = "hiredis-2.1.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:f15e48545dadf3760220821d2f3c850e0c67bbc66aad2776c9d716e6216b5103"}, - {file = "hiredis-2.1.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b3a437e3af246dd06d116f1615cdf4e620e639dfcc923fe3045e00f6a967fc27"}, - {file = "hiredis-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b61732d75e2222a3b0060b97395df78693d5c3487fe4a5d0b75f6ac1affc68b9"}, - {file = "hiredis-2.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:170c2080966721b42c5a8726e91c5fc271300a4ac9ddf8a5b79856cfd47553e1"}, - {file = "hiredis-2.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d2d6e4caaffaf42faf14cfdf20b1d6fff6b557137b44e9569ea6f1877e6f375d"}, - {file = "hiredis-2.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d64b2d90302f0dd9e9ba43e89f8640f35b6d5968668da82ba2d2652b2cc3c3d2"}, - {file = "hiredis-2.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:61fd1c55efb48ba734628f096e7a50baf0df3f18e91183face5c07fba3b4beb7"}, - {file = "hiredis-2.1.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfc5e923828714f314737e7f856b3dccf8805e5679fe23f07241b397cd785f6c"}, - {file = "hiredis-2.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:ef2aa0485735c8608a92964e52ab9025ceb6003776184a1eb5d1701742cc910b"}, - {file = "hiredis-2.1.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2d39193900a03b900a25d474b9f787434f05a282b402f063d4ca02c62d61bdb9"}, - {file = "hiredis-2.1.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:4b51f5eb47e61c6b82cb044a1815903a77a4f840fa050fd2ff40d617c102d16c"}, - {file = "hiredis-2.1.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:d9145d011b74bef972b485a09f391babaa101626dbb54afc2313d5682a746593"}, - {file = "hiredis-2.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6f45509b43d720d64837c1211fcdea42acd48e71539b7152d74c16413ceea080"}, - {file = "hiredis-2.1.1-cp310-cp310-win32.whl", hash = "sha256:3a284bbf6503cd6ac1183b3542fe853a8be47fb52a631224f6dda46ba229d572"}, - {file = "hiredis-2.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:f60fad285db733b2badba43f7036a1241cb3e19c17260348f3ff702e6eaa4980"}, - {file = "hiredis-2.1.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:69c20816ac2af11701caf10e5b027fd33c6e8dfe7806ab71bc5191aa2a6d50f9"}, - {file = "hiredis-2.1.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:cd43dbaa73322a0c125122114cbc2c37141353b971751d05798f3b9780091e90"}, - {file = "hiredis-2.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c9632cd480fbc09c14622038a9a5f2f21ef6ce35892e9fa4df8d3308d3f2cedf"}, - {file = "hiredis-2.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:252d4a254f1566012b94e35cba577a001d3a732fa91e824d2076233222232cf9"}, - {file = "hiredis-2.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b901e68f3a6da279388e5dbe8d3bc562dd6dd3ff8a4b90e4f62e94de36461777"}, - {file = "hiredis-2.1.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f45f296998043345ecfc4f69a51fa4f3e80ca3659864df80b459095580968a6"}, - {file = "hiredis-2.1.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79f2acf237428dd61faa5b49247999ff68f45b3552c57303fcfabd2002eab249"}, - {file = "hiredis-2.1.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82bc6f5b92c9fcd5b5d6506000dd433006b126b193932c52a9bcc10dcc10e4fc"}, - {file = "hiredis-2.1.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:19843e4505069085301c3126c91b4e48970070fb242d7c617fb6777e83b55541"}, - {file = "hiredis-2.1.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c7336fddae533cbe786360d7a0316c71fe96313872c06cde20a969765202ab04"}, - {file = "hiredis-2.1.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:90b4355779970e121c219def3e35533ec2b24773a26fc4aa0f8271dd262fa2f2"}, - {file = "hiredis-2.1.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4beaac5047317a73b27cf15b4f4e0d2abaafa8378e1a6ed4cf9ff420d8f88aba"}, - {file = "hiredis-2.1.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7e25dc06e02689a45a49fa5e2f48bdfdbc11c5b52bef792a8cb37e0b82a7b0ae"}, - {file = "hiredis-2.1.1-cp311-cp311-win32.whl", hash = "sha256:f8b3233c1de155743ef34b0cae494e33befed5e0adba77762f5d8a8e417c5015"}, - {file = "hiredis-2.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:4ced076af04e28761d486501c58259247c1882fd19c7f94c18a257d143248eee"}, - {file = "hiredis-2.1.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:f4300e063045e11ee79b79a7c9426813ab8d97e340b15843374093225dde407d"}, - {file = "hiredis-2.1.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b04b6c04fe13e1e30ba6f9340d3d0fb776a7e52611d11809fb59341871e050e5"}, - {file = "hiredis-2.1.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:436dcbbe3104737e8b4e2d63a019a764d107d72d6b6ee3cd107097c1c263fd1e"}, - {file = "hiredis-2.1.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:11801d9e96f39286ab558c6db940c39fc00150450ae1007d18b35437d2f79ad7"}, - {file = "hiredis-2.1.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7d8d0ca7b4f6136f8a29845d31cfbc3f562cbe71f26da6fca55aa4977e45a18"}, - {file = "hiredis-2.1.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c040af9eb9b12602b4b714b90a1c2ac1109e939498d47b0748ec33e7a948747"}, - {file = "hiredis-2.1.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:f448146b86a8693dda5f02bb4cb2ef65c894db2cf743e7bf351978354ce685e3"}, - {file = "hiredis-2.1.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:649c5a1f0952af50f008f0bbec5f0b1e519150220c0a71ef80541a0c128d0c13"}, - {file = "hiredis-2.1.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b8e7415b0952b0dd6df3aa2d37b5191c85e54d6a0ac1449ddb1e9039bbb39fa5"}, - {file = "hiredis-2.1.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:38c1a56a30b953e3543662f950f498cfb17afed214b27f4fc497728fb623e0c9"}, - {file = "hiredis-2.1.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6050b519fb3b62d68a28a1941ae9dc5122e8820fef2b8e20a65cb3c1577332a0"}, - {file = "hiredis-2.1.1-cp37-cp37m-win32.whl", hash = "sha256:96add2a205efffe5e19a256a50be0ed78fcb5e9503242c65f57928e95cf4c901"}, - {file = "hiredis-2.1.1-cp37-cp37m-win_amd64.whl", hash = "sha256:8ceb101095f8cce9ac672ed7244b002d83ea97af7f27bb73f2fbe7fe8e8f03c7"}, - {file = "hiredis-2.1.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:9f068136e5119f2ba939ecd45c47b4e3cf6dd7ca9a65b6078c838029c5c1f564"}, - {file = "hiredis-2.1.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8a42e246a03086ae1430f789e37d7192113db347417932745c4700d8999f853a"}, - {file = "hiredis-2.1.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5359811bfdb10fca234cba4629e555a1cde6c8136025395421f486ce43129ae3"}, - {file = "hiredis-2.1.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d304746e2163d3d2cbc4c08925539e00d2bb3edc9e79fce531b5468d4e264d15"}, - {file = "hiredis-2.1.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4fe297a52a8fc1204eef646bebf616263509d089d472e25742913924b1449099"}, - {file = "hiredis-2.1.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:637e563d5cbf79d8b04224f99cfce8001146647e7ce198f0b032e32e62079e3c"}, - {file = "hiredis-2.1.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:39b61340ff2dcd99d5ded0ca5fc33c878d89a1426e2f7b6dbc7c7381e330bc8a"}, - {file = "hiredis-2.1.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66eaf6d5ea5207177ba8ffb9ee479eea743292267caf1d6b89b51cf9d5885d23"}, - {file = "hiredis-2.1.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4d2d0e458c32cdafd9a0f0b0aaeb61b169583d074287721eee740b730b7654bd"}, - {file = "hiredis-2.1.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8a92781e466f2f1f9d38720d8920cb094bc0d59f88219591bc12b1c12c9d471c"}, - {file = "hiredis-2.1.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:5560b09304ebaac5323a7402f5090f2a8559843200014f5adf1ff7517dd3805b"}, - {file = "hiredis-2.1.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4732a0bf877bbd69d4d1b38a3db2160252acb31894a48f324fd54f742f6b2123"}, - {file = "hiredis-2.1.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b5bd33ac8a572e2aa94b489dec35b0c00ca554b27e56ad19953e0bf2cbcf3ad8"}, - {file = "hiredis-2.1.1-cp38-cp38-win32.whl", hash = "sha256:07e86649773e486a21e170d1396217e15833776d9e8f4a7121c28a1d37e032c9"}, - {file = "hiredis-2.1.1-cp38-cp38-win_amd64.whl", hash = "sha256:b964d81db8f11a99552621acd24c97381a0fd401a57187ce9f8cb9a53f4b6f4e"}, - {file = "hiredis-2.1.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:27e89e7befc785a273cccb105840db54b7f93005adf4e68c516d57b19ea2aac2"}, - {file = "hiredis-2.1.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:ea6f0f98e1721741b5bc3167a495a9f16459fe67648054be05365a67e67c29ba"}, - {file = "hiredis-2.1.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:40c34aeecccb9474999839299c9d2d5ff46a62ed47c58645b7965f48944abd74"}, - {file = "hiredis-2.1.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65927e75da4265ec88d06cbdab20113a9e69bbac3aea1ec053d4d940f1c88fc8"}, - {file = "hiredis-2.1.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:72cab67bcceb2e998da2f28aad9ec7b1a5ece5888f7ac3d3723cccba62338703"}, - {file = "hiredis-2.1.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d67429ff99231137491d8c3daa097c767a9c273bb03ac412ed8f6acb89e2e52f"}, - {file = "hiredis-2.1.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c596bce5e9dd379c68c17208716da2767bb6f6f2a71d748f9e4c247ced31e6"}, - {file = "hiredis-2.1.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e0aab2d6e60aa9f9e14c83396b4a58fb4aded712806486c79189bcae4a175ac"}, - {file = "hiredis-2.1.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:17deb7d218a5ae9f05d2b19d51936231546973303747924fc17a2869aef0029a"}, - {file = "hiredis-2.1.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:d3d60e2af4ce93d6e45a50a9b5795156a8725495e411c7987a2f81ab14e99665"}, - {file = "hiredis-2.1.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:fbc960cd91e55e2281e1a330e7d1c4970b6a05567dd973c96e412b4d012e17c6"}, - {file = "hiredis-2.1.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0ae718e9db4b622072ff73d38bc9cd7711edfedc8a1e08efe25a6c8170446da4"}, - {file = "hiredis-2.1.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e51e3fa176fecd19660f898c4238232e8ca0f5709e6451a664c996f9aec1b8e1"}, - {file = "hiredis-2.1.1-cp39-cp39-win32.whl", hash = "sha256:0258bb84b4a1e015f14f891d91957042fa88f6f4e86cc0808d735ebbc1e3fc88"}, - {file = "hiredis-2.1.1-cp39-cp39-win_amd64.whl", hash = "sha256:c5a47c964c58c044a323336a798d8729722e09865d7e087eb3512df6146b39a8"}, - {file = "hiredis-2.1.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8de0334c212e069d49952e476e16c6b42ba9677cc1e2d2f4588bd9a39489a3ab"}, - {file = "hiredis-2.1.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:653e33f69202c00eca35416ee23091447ad1e9f9a556cc2b715b2befcfc31b3c"}, - {file = "hiredis-2.1.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f14cccf931c859ba3169d766e892a3673a79649ec2ceca7ba95ea376b23fd222"}, - {file = "hiredis-2.1.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86c56359fd7aca6a9ca41af91636aef15d5ad6d19e631ebd662f233c79f7e100"}, - {file = "hiredis-2.1.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:c2b197e3613c3aef3933b2c6eb095bd4be9c84022aea52057697b709b400c4bc"}, - {file = "hiredis-2.1.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ec060d6db9576f6723b5290448aea67160608556b5506eb947997d9d1ca6f7b7"}, - {file = "hiredis-2.1.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8781f5b91d75abef529a33cf3509ba5fe540d2814de0c4602f0f5ba6f1669739"}, - {file = "hiredis-2.1.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9bd6b934794bea92a15b10ac35889df63b28d2abf9d020a7c87c05dd9c6e1edd"}, - {file = "hiredis-2.1.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf6d85c1ffb4ec4a859b2f31cd8845e633f91ed971a3cce6f59a722dcc361b8c"}, - {file = "hiredis-2.1.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:bbf80c686e3f63d40b0ab42d3605d3b6d415c368a5d8a9764a314ebda6138650"}, - {file = "hiredis-2.1.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c1d85dfdf37a8df0e0174fc0c762b485b80a2fc7ce9592ae109aaf4a5d45ba9a"}, - {file = "hiredis-2.1.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:816b9ea96e7cc2496a1ac9c4a76db670827c1e31045cc377c66e64a20bb4b3ff"}, - {file = "hiredis-2.1.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db59afa0edf194bea782e4686bfc496fc1cea2e24f310d769641e343d14cc929"}, - {file = "hiredis-2.1.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c7a7e4ccec7164cdf2a9bbedc0e7430492eb56d9355a41377f40058c481bccc"}, - {file = "hiredis-2.1.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:646f150fa73f9cbc69419e34a1aae318c9f39bd9640760aa46624b2815da0c2d"}, - {file = "hiredis-2.1.1.tar.gz", hash = "sha256:21751e4b7737aaf7261a068758b22f7670155099592b28d8dde340bf6874313d"}, + {file = "hiredis-2.2.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:998ab35070dc81806a23be5de837466a51b25e739fb1a0d5313474d5bb29c829"}, + {file = "hiredis-2.2.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:70db8f514ebcb6f884497c4eee21d0350bbc4102e63502411f8e100cf3b7921e"}, + {file = "hiredis-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a57a4a33a78e94618d026fc68e853d3f71fa4a1d4da7a6e828e927819b001f2d"}, + {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:209b94fa473b39e174b665186cad73206ca849cf6e822900b761e83080f67b06"}, + {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:58e51d83b42fdcc29780897641b1dcb30c0e4d3c4f6d9d71d79b2cfec99b8eb7"}, + {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:706995fb1173fab7f12110fbad00bb95dd0453336f7f0b341b4ca7b1b9ff0bc7"}, + {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:812e27a9b20db967f942306267bcd8b1369d7c171831b6f45d22d75576cd01cd"}, + {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69c32d54ac1f6708145c77d79af12f7448ca1025a0bf912700ad1f0be511026a"}, + {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:96745c4cdca261a50bd70c01f14c6c352a48c4d6a78e2d422040fba7919eadef"}, + {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:943631a49d7746cd413acaf0b712d030a15f02671af94c54759ba3144351f97a"}, + {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:796b616478a5c1cac83e9e10fcd803e746e5a02461bfa7767aebae8b304e2124"}, + {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:341952a311654c39433c1e0d8d31c2a0c5864b2675ed159ed264ecaa5cfb225b"}, + {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6fbb1a56d455602bd6c276d5c316ae245111b2dc8158355112f2d905e7471c85"}, + {file = "hiredis-2.2.1-cp310-cp310-win32.whl", hash = "sha256:14f67987e1d55b197e46729d1497019228ad8c94427bb63500e6f217aa586ca5"}, + {file = "hiredis-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:ea011b3bfa37f2746737860c1e5ba198b63c9b4764e40b042aac7bd2c258938f"}, + {file = "hiredis-2.2.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:103bde304d558061c4ba1d7ff94351e761da753c28883fd68964f25080152dac"}, + {file = "hiredis-2.2.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6ba9f425739a55e1409fda5dafad7fdda91c6dcd2b111ba93bb7b53d90737506"}, + {file = "hiredis-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cb59a7535e0b8373f694ce87576c573f533438c5fbee450193333a22118f4a98"}, + {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afbddc82bbb2c4c405d9a49a056ffe6541f8ad3160df49a80573b399f94ba3a"}, + {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a386f00800b1b043b091b93850e02814a8b398952438a9d4895bd70f5c80a821"}, + {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fec7465caac7b0a36551abb37066221cabf59f776d78fdd58ff17669942b4b41"}, + {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd590dd7858d0107c37b438aa27bbcaa0ba77c5b8eda6ebab7acff0aa89f7d7"}, + {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1523ec56d711bee863aaaf4325cef4430da3143ec388e60465f47e28818016cd"}, + {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d4f6bbe599d255a504ef789c19e23118c654d256343c1ecdf7042fb4b4d0f7fa"}, + {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d77dbc13d55c1d45d6a203da910002fffd13fa310af5e9c5994959587a192789"}, + {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b2b847ea3f9af99e02c4c58b7cc6714e105c8d73705e5ff1132e9a249391f688"}, + {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:18135ecf28fc6577e71c0f8d8eb2f31e4783020a7d455571e7e5d2793374ce20"}, + {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:724aed63871bc386d6f28b5f4d15490d84934709f093e021c4abb785e72db5db"}, + {file = "hiredis-2.2.1-cp311-cp311-win32.whl", hash = "sha256:497a8837984ddfbf6f5a4c034c0107f2c5aaaebeebf34e2c6ab591acffce5f12"}, + {file = "hiredis-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:1776db8af168b22588ec10c3df674897b20cc6d25f093cd2724b8b26d7dac057"}, + {file = "hiredis-2.2.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:49a518b456403602775218062a4dd06bed42b26854ff1ff6784cfee2ef6fa347"}, + {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02118dc8545e2371448b9983a0041f12124eea907eb61858f2be8e7c1dfa1e43"}, + {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78f2a53149b116e0088f6eda720574f723fbc75189195aab8a7a2a591ca89cab"}, + {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e3b8f0eba6d88c2aec63e6d1e38960f8a25c01f9796d32993ffa1cfcf48744c"}, + {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38270042f40ed9e576966c603d06c984c80364b0d9ec86962a31551dae27b0cd"}, + {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a11250dd0521e9f729325b19ce9121df4cbb80ad3468cc21e56803e8380bc4b"}, + {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:595474e6c25f1c3c8ec67d587188e7dd47c492829b2c7c5ba1b17ee9e7e9a9ea"}, + {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8ad00a7621de8ef9ae1616cf24a53d48ad1a699b96668637559a8982d109a800"}, + {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a5e5e51faa7cd02444d4ee1eb59e316c08e974bcfa3a959cb790bc4e9bb616c5"}, + {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:0a9493bbc477436a3725e99cfcba768f416ab70ab92956e373d1a3b480b1e204"}, + {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:231e5836579fc75b25c6f9bb6213950ea3d39aadcfeb7f880211ca55df968342"}, + {file = "hiredis-2.2.1-cp37-cp37m-win32.whl", hash = "sha256:2ed6c948648798b440a9da74db65cdd2ad22f38cf4687f5212df369031394591"}, + {file = "hiredis-2.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:c65f38418e35970d44f9b5a59533f0f60f14b9f91b712dba51092d2c74d4dcd1"}, + {file = "hiredis-2.2.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:2f6e80fb7cd4cc61af95ab2875801e4c36941a956c183297c3273cbfbbefa9d3"}, + {file = "hiredis-2.2.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:a54d2b3328a2305e0dfb257a4545053fdc64df0c64e0635982e191c846cc0456"}, + {file = "hiredis-2.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:33624903dfb629d6f7c17ed353b4b415211c29fd447f31e6bf03361865b97e68"}, + {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f4b92df1e69dc48411045d2117d1d27ec6b5f0dd2b6501759cea2f6c68d5618"}, + {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03c6a1f6bf2f64f40d076c997cdfcb8b3d1c9557dda6cb7bbad2c5c839921726"}, + {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af3071d33432960cba88ce4e4932b508ab3e13ce41431c2a1b2dc9a988f7627"}, + {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb3f56d371b560bf39fe45d29c24e3d819ae2399733e2c86394a34e76adab38"}, + {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5da26970c41084a2ac337a4f075301a78cffb0e0f3df5e98c3049fc95e10725c"}, + {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d87f90064106dfd7d2cc7baeb007a8ca289ee985f4bf64bb627c50cdc34208ed"}, + {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c233199b9f4dd43e2297577e32ba5fcd0378871a47207bc424d5e5344d030a3e"}, + {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:99b5bcadd5e029234f89d244b86bc8d21093be7ac26111068bebd92a4a95dc73"}, + {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ed79f65098c4643cb6ec4530b337535f00b58ea02e25180e3df15e9cc9da58dc"}, + {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c7fd6394779c9a3b324b65394deadb949311662f3770bd34f904b8c04328082c"}, + {file = "hiredis-2.2.1-cp38-cp38-win32.whl", hash = "sha256:bde0178e7e6c49e408b8d3a8c0ec8e69a23e8dc2ae29f87af2d74b21025385dc"}, + {file = "hiredis-2.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:6f5f469ba5ae613e4c652cdedfc723aa802329fcc2d65df1e9ab0ac0de34ad9e"}, + {file = "hiredis-2.2.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:e5945ef29a76ab792973bef1ffa2970d81dd22edb94dfa5d6cba48beb9f51962"}, + {file = "hiredis-2.2.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bad6e9a0e31678ee15ac3ef72e77c08177c86df05c37d2423ff3cded95131e51"}, + {file = "hiredis-2.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e57dfcd72f036cce9eab77bc533a932444459f7e54d96a555d25acf2501048be"}, + {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3afc76a012b907895e679d1e6bcc6394845d0cc91b75264711f8caf53d7b0f37"}, + {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a99c0d50d1a31be285c83301eff4b911dca16aac1c3fe1875c7d6f517a1e9fc4"}, + {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8849bc74473778c10377f82cf9a534e240734e2f9a92c181ef6d51b4e3d3eb2"}, + {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e199868fe78c2d175bbb7b88f5daf2eae4a643a62f03f8d6736f9832f04f88b"}, + {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0e98106a28fabb672bb014f6c4506cc67491e4cf9ac56d189cbb1e81a9a3e68"}, + {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0f2607e08dcb1c5d1e925c451facbfc357927acaa336a004552c32a6dd68e050"}, + {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:954abb363ed1d18dfb7510dbd89402cb7c21106307e04e2ee7bccf35a134f4dd"}, + {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0474ab858f5dd15be6b467d89ec14b4c287f53b55ca5455369c3a1a787ef3a24"}, + {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b90dd0adb1d659f8c94b32556198af1e61e38edd27fc7434d4b3b68ad4e51d37"}, + {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a5dac3ae05bc64b233f950edf37dce9c904aedbc7e18cfc2adfb98edb85da46"}, + {file = "hiredis-2.2.1-cp39-cp39-win32.whl", hash = "sha256:19666eb154b7155d043bf941e50d1640125f92d3294e2746df87639cc44a10e6"}, + {file = "hiredis-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:c702dd28d52656bb86f7a2a76ea9341ac434810871b51fcd6cd28c6d7490fbdf"}, + {file = "hiredis-2.2.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c604919bba041e4c4708ecb0fe6c7c8a92a7f1e886b0ae8d2c13c3e4abfc5eda"}, + {file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04c972593f26f4769e2be7058b7928179337593bcfc6a8b6bda87eea807b7cbf"}, + {file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42504e4058246536a9f477f450ab21275126fc5f094be5d5e5290c6de9d855f9"}, + {file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220b6ac9d3fce60d14ccc34f9790e20a50dc56b6fb747fc357600963c0cf6aca"}, + {file = "hiredis-2.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a16d81115128e6a9fc6904de051475be195f6c460c9515583dccfd407b16ff78"}, + {file = "hiredis-2.2.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:df6325aade17b1f86c8b87f6a1d9549a4184fda00e27e2fca0e5d2a987130365"}, + {file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcad9c9239845b29f149a895e7e99b8307889cecbfc37b69924c2dad1f4ae4e8"}, + {file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ccf6fc116795d76bca72aa301a33874c507f9e77402e857d298c73419b5ea3"}, + {file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:63f941e77c024be2a1451089e2fdbd5ff450ff0965f49948bbeb383aef1799ea"}, + {file = "hiredis-2.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2bb682785a37145b209f44f5d5290b0f9f4b56205542fc592d0f1b3d5ffdfcf0"}, + {file = "hiredis-2.2.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8fe289556264cb1a2efbcd3d6b3c55e059394ad01b6afa88151264137f85c352"}, + {file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96b079c53b6acd355edb6fe615270613f3f7ddc4159d69837ce15ec518925c40"}, + {file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82ad46d1140c5779cd9dfdafc35f47dd09dadff7654d8001c50bb283da82e7c9"}, + {file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17e9f363db56a8edb4eff936354cfa273197465bcd970922f3d292032eca87b0"}, + {file = "hiredis-2.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ae6b356ed166a0ec663a46b547c988815d2b0e5f2d0af31ef34a16cf3ce705d0"}, + {file = "hiredis-2.2.1.tar.gz", hash = "sha256:d9fbef7f9070055a7cc012ac965e3dbabbf2400b395649ea8d6016dc82a7d13a"}, ] [[package]] From 96e67d5cba515dcd34add4a4bf394df852710168 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Feb 2023 11:34:01 +0000 Subject: [PATCH 121/344] Bump types-setuptools from 65.6.0.3 to 67.1.0.0 (#14994) * Bump types-setuptools from 65.6.0.3 to 67.1.0.0 Bumps [types-setuptools](https://github.com/python/typeshed) from 65.6.0.3 to 67.1.0.0. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-setuptools dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14994.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14994.misc diff --git a/changelog.d/14994.misc b/changelog.d/14994.misc new file mode 100644 index 000000000000..7ca29f1ca920 --- /dev/null +++ b/changelog.d/14994.misc @@ -0,0 +1 @@ +Bump types-setuptools from 65.6.0.3 to 67.1.0.0. diff --git a/poetry.lock b/poetry.lock index 9170f81ac591..68e00a725e4d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2712,14 +2712,14 @@ types-urllib3 = "<1.27" [[package]] name = "types-setuptools" -version = "65.6.0.3" +version = "67.1.0.0" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-65.6.0.3.tar.gz", hash = "sha256:7ddd7415282fa97ab18e490206067c0cdb126b103743e72ee86783d7af6481c5"}, - {file = "types_setuptools-65.6.0.3-py3-none-any.whl", hash = "sha256:ad729fc3a9a3946f73915eaab16ce56b30ed5ae998479253d809d76b3889ee09"}, + {file = "types-setuptools-67.1.0.0.tar.gz", hash = "sha256:162a39d22e3a5eb802197c84f16b19e798101bbd33d9437837fbb45627da5627"}, + {file = "types_setuptools-67.1.0.0-py3-none-any.whl", hash = "sha256:5bd7a10d93e468bfcb10d24cb8ea5e12ac4f4ac91267293959001f1448cf0619"}, ] [package.dependencies] From ef23d6b296da86e3552b31342dc9f3dd276fda93 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Feb 2023 11:34:22 +0000 Subject: [PATCH 122/344] Bump prometheus-client from 0.15.0 to 0.16.0 (#14995) * Bump prometheus-client from 0.15.0 to 0.16.0 Bumps [prometheus-client](https://github.com/prometheus/client_python) from 0.15.0 to 0.16.0. - [Release notes](https://github.com/prometheus/client_python/releases) - [Commits](https://github.com/prometheus/client_python/compare/v0.15.0...v0.16.0) --- updated-dependencies: - dependency-name: prometheus-client dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14995.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14995.misc diff --git a/changelog.d/14995.misc b/changelog.d/14995.misc new file mode 100644 index 000000000000..44af2821a641 --- /dev/null +++ b/changelog.d/14995.misc @@ -0,0 +1 @@ +Bump prometheus-client from 0.15.0 to 0.16.0. diff --git a/poetry.lock b/poetry.lock index 68e00a725e4d..26b9ce06501d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1436,14 +1436,14 @@ test = ["appdirs (==1.4.4)", "pytest (>=6)", "pytest-cov (>=2.7)", "pytest-mock [[package]] name = "prometheus-client" -version = "0.15.0" +version = "0.16.0" description = "Python client for the Prometheus monitoring system." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "prometheus_client-0.15.0-py3-none-any.whl", hash = "sha256:db7c05cbd13a0f79975592d112320f2605a325969b270a94b71dcabc47b931d2"}, - {file = "prometheus_client-0.15.0.tar.gz", hash = "sha256:be26aa452490cfcf6da953f9436e95a9f2b4d578ca80094b4458930e5f584ab1"}, + {file = "prometheus_client-0.16.0-py3-none-any.whl", hash = "sha256:0836af6eb2c8f4fed712b2f279f6c0a8bbab29f9f4aa15276b91c7cb0d1616ab"}, + {file = "prometheus_client-0.16.0.tar.gz", hash = "sha256:a03e35b359f14dd1630898543e2120addfdeacd1a6069c1367ae90fd93ad3f48"}, ] [package.extras] From 041eab647dbefb21cbabf40fbba2c61cdce8ebfd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Feb 2023 11:34:54 +0000 Subject: [PATCH 123/344] Bump serde_json from 1.0.91 to 1.0.92 (#14997) * Bump serde_json from 1.0.91 to 1.0.92 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.91 to 1.0.92. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.91...v1.0.92) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- Cargo.lock | 4 ++-- changelog.d/14997.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14997.misc diff --git a/Cargo.lock b/Cargo.lock index 079a3f854e75..d91d0baa1413 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -343,9 +343,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" +checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a" dependencies = [ "itoa", "ryu", diff --git a/changelog.d/14997.misc b/changelog.d/14997.misc new file mode 100644 index 000000000000..2ae4582dabd4 --- /dev/null +++ b/changelog.d/14997.misc @@ -0,0 +1 @@ +Bump serde_json from 1.0.91 to 1.0.92. From 4e2b58bc52fb2ec02d53b8c569f3e21f0cc0ad5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Feb 2023 11:35:06 +0000 Subject: [PATCH 124/344] Bump isort from 5.11.4 to 5.11.5 (#14998) * Bump isort from 5.11.4 to 5.11.5 Bumps [isort](https://github.com/pycqa/isort) from 5.11.4 to 5.11.5. - [Release notes](https://github.com/pycqa/isort/releases) - [Changelog](https://github.com/PyCQA/isort/blob/main/CHANGELOG.md) - [Commits](https://github.com/pycqa/isort/compare/5.11.4...5.11.5) --- updated-dependencies: - dependency-name: isort dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14998.misc | 1 + poetry.lock | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14998.misc diff --git a/changelog.d/14998.misc b/changelog.d/14998.misc new file mode 100644 index 000000000000..f191ae22ac51 --- /dev/null +++ b/changelog.d/14998.misc @@ -0,0 +1 @@ +Bump isort from 5.11.4 to 5.11.5. diff --git a/poetry.lock b/poetry.lock index 26b9ce06501d..390c67fe91f2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -770,19 +770,19 @@ scripts = ["click (>=6.0)", "twisted (>=16.4.0)"] [[package]] name = "isort" -version = "5.11.4" +version = "5.11.5" description = "A Python utility / library to sort Python imports." category = "dev" optional = false python-versions = ">=3.7.0" files = [ - {file = "isort-5.11.4-py3-none-any.whl", hash = "sha256:c033fd0edb91000a7f09527fe5c75321878f98322a77ddcc81adbd83724afb7b"}, - {file = "isort-5.11.4.tar.gz", hash = "sha256:6db30c5ded9815d813932c04c2f85a360bcdd35fed496f4d8f35495ef0a261b6"}, + {file = "isort-5.11.5-py3-none-any.whl", hash = "sha256:ba1d72fb2595a01c7895a5128f9585a5cc4b6d395f1c8d514989b9a7eb2a8746"}, + {file = "isort-5.11.5.tar.gz", hash = "sha256:6be1f76a507cb2ecf16c7cf14a37e41609ca082330be4e3436a18ef74add55db"}, ] [package.extras] colors = ["colorama (>=0.4.3,<0.5.0)"] -pipfile-deprecated-finder = ["pipreqs", "requirementslib"] +pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] plugins = ["setuptools"] requirements-deprecated-finder = ["pip-api", "pipreqs"] From e3808e53dc3815e1998929336d103140980d03ae Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Feb 2023 11:35:39 +0000 Subject: [PATCH 125/344] Bump phonenumbers from 8.13.4 to 8.13.5 (#14999) * Bump phonenumbers from 8.13.4 to 8.13.5 Bumps [phonenumbers](https://github.com/daviddrysdale/python-phonenumbers) from 8.13.4 to 8.13.5. - [Release notes](https://github.com/daviddrysdale/python-phonenumbers/releases) - [Commits](https://github.com/daviddrysdale/python-phonenumbers/compare/v8.13.4...v8.13.5) --- updated-dependencies: - dependency-name: phonenumbers dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/14999.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14999.misc diff --git a/changelog.d/14999.misc b/changelog.d/14999.misc new file mode 100644 index 000000000000..f03f9239e825 --- /dev/null +++ b/changelog.d/14999.misc @@ -0,0 +1 @@ +Bump phonenumbers from 8.13.4 to 8.13.5. diff --git a/poetry.lock b/poetry.lock index 390c67fe91f2..71095c21ed9d 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1290,14 +1290,14 @@ files = [ [[package]] name = "phonenumbers" -version = "8.13.4" +version = "8.13.5" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." category = "main" optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.4-py2.py3-none-any.whl", hash = "sha256:a577a46c069ad889c7b7cf4dd978751d059edeab28b97acead4775d2ea1fc70a"}, - {file = "phonenumbers-8.13.4.tar.gz", hash = "sha256:6d63455012fc9431105ffc7739befca61c3efc551b287dca58d2be2e745475a9"}, + {file = "phonenumbers-8.13.5-py2.py3-none-any.whl", hash = "sha256:2e3fd1f3fde226b289489275517c76edf223eafd9f43a2c2c36498a44b73d4b0"}, + {file = "phonenumbers-8.13.5.tar.gz", hash = "sha256:6eb2faf29c19f946baf10f1c977a1f856cab90819fe7735b8e141d5407420c4a"}, ] [[package]] From 3e37ff1a7e0ee7f8e64976a6e166e7d3f507c9f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Feb 2023 12:18:11 +0000 Subject: [PATCH 126/344] Bump anyhow from 1.0.68 to 1.0.69 (#14996) * Bump anyhow from 1.0.68 to 1.0.69 Bumps [anyhow](https://github.com/dtolnay/anyhow) from 1.0.68 to 1.0.69. - [Release notes](https://github.com/dtolnay/anyhow/releases) - [Commits](https://github.com/dtolnay/anyhow/compare/1.0.68...1.0.69) --- updated-dependencies: - dependency-name: anyhow dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- Cargo.lock | 4 ++-- changelog.d/14996.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14996.misc diff --git a/Cargo.lock b/Cargo.lock index d91d0baa1413..a9219eac11e3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.68" +version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" +checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" [[package]] name = "arc-swap" diff --git a/changelog.d/14996.misc b/changelog.d/14996.misc new file mode 100644 index 000000000000..7ccef81e8288 --- /dev/null +++ b/changelog.d/14996.misc @@ -0,0 +1 @@ +Bump anyhow from 1.0.68 to 1.0.69. From e8269ed391a199bbe0e43efc28c68c98b949b323 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 6 Feb 2023 12:49:06 +0000 Subject: [PATCH 127/344] Type hints for tests.appservice (#14990) * Accept a Sequence of events in synapse.appservice This avoids some casts/ignores in the tests I'm about to fixup. It seems that `List[Mock]` is not a subtype of `List[EventBase]`, but `Sequence[Mock]` is a subtype of `Sequence[EventBase]`. So presumably `Mock` is considered a subtype of anything, much like `Any`. * make tests.appservice.test_scheduler pass mypy * Extra hints in tests.appservice.test_scheduler * Extra hints in tests.appservice.test_api * Extra hints in tests.appservice.test_appservice * Disallow untyped defs * Changelog --- changelog.d/14990.misc | 1 + mypy.ini | 4 +- synapse/appservice/__init__.py | 4 +- synapse/appservice/api.py | 14 ++- synapse/appservice/scheduler.py | 3 +- synapse/storage/databases/main/appservice.py | 14 ++- tests/appservice/test_api.py | 4 +- tests/appservice/test_appservice.py | 55 ++++++++---- tests/appservice/test_scheduler.py | 92 +++++++++++++------- 9 files changed, 132 insertions(+), 59 deletions(-) create mode 100644 changelog.d/14990.misc diff --git a/changelog.d/14990.misc b/changelog.d/14990.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/14990.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index a6e37bc3775a..351b8ccade20 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,7 +32,6 @@ exclude = (?x) |synapse/storage/databases/main/cache.py |synapse/storage/schema/ - |tests/appservice/test_scheduler.py |tests/federation/test_federation_catch_up.py |tests/federation/test_federation_sender.py |tests/http/federation/test_matrix_federation_agent.py @@ -78,6 +77,9 @@ disallow_untyped_defs = True [mypy-tests.app.*] disallow_untyped_defs = True +[mypy-tests.appservice.*] +disallow_untyped_defs = True + [mypy-tests.config.*] disallow_untyped_defs = True diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index 65615f50b8be..35c330a3c4af 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -16,7 +16,7 @@ import logging import re from enum import Enum -from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Pattern +from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Pattern, Sequence import attr from netaddr import IPSet @@ -377,7 +377,7 @@ def __init__( self, service: ApplicationService, id: int, - events: List[EventBase], + events: Sequence[EventBase], ephemeral: List[JsonDict], to_device_messages: List[JsonDict], one_time_keys_count: TransactionOneTimeKeysCount, diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index edafd433cda3..1a6f69e7d3f3 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -14,7 +14,17 @@ # limitations under the License. import logging import urllib.parse -from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + List, + Mapping, + Optional, + Sequence, + Tuple, +) from prometheus_client import Counter from typing_extensions import TypeGuard @@ -259,7 +269,7 @@ async def _get() -> Optional[JsonDict]: async def push_bulk( self, service: "ApplicationService", - events: List[EventBase], + events: Sequence[EventBase], ephemeral: List[JsonDict], to_device_messages: List[JsonDict], one_time_keys_count: TransactionOneTimeKeysCount, diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 7b562795a3f2..3a319b0d42d9 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -57,6 +57,7 @@ Iterable, List, Optional, + Sequence, Set, Tuple, ) @@ -364,7 +365,7 @@ def __init__(self, clock: Clock, store: DataStore, as_api: ApplicationServiceApi async def send( self, service: ApplicationService, - events: List[EventBase], + events: Sequence[EventBase], ephemeral: Optional[List[JsonDict]] = None, to_device_messages: Optional[List[JsonDict]] = None, one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None, diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index c2c8018ee255..5fb152c4ff07 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -14,7 +14,17 @@ # limitations under the License. import logging import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Pattern, Tuple, cast +from typing import ( + TYPE_CHECKING, + Any, + Dict, + List, + Optional, + Pattern, + Sequence, + Tuple, + cast, +) from synapse.appservice import ( ApplicationService, @@ -257,7 +267,7 @@ async def set_appservice_state( async def create_appservice_txn( self, service: ApplicationService, - events: List[EventBase], + events: Sequence[EventBase], ephemeral: List[JsonDict], to_device_messages: List[JsonDict], one_time_keys_count: TransactionOneTimeKeysCount, diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 89ee79396fb5..9d183b733ec2 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -29,7 +29,7 @@ class ApplicationServiceApiTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.api = hs.get_application_service_api() self.service = ApplicationService( id="unique_identifier", @@ -39,7 +39,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): hs_token=TOKEN, ) - def test_query_3pe_authenticates_token(self): + def test_query_3pe_authenticates_token(self) -> None: """ Tests that 3pe queries to the appservice are authenticated with the appservice's token. diff --git a/tests/appservice/test_appservice.py b/tests/appservice/test_appservice.py index d4dccfc2f070..dee976356faa 100644 --- a/tests/appservice/test_appservice.py +++ b/tests/appservice/test_appservice.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import re +from typing import Generator from unittest.mock import Mock from twisted.internet import defer @@ -27,7 +28,7 @@ def _regex(regex: str, exclusive: bool = True) -> Namespace: class ApplicationServiceTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.service = ApplicationService( id="unique_identifier", sender="@as:test", @@ -46,7 +47,9 @@ def setUp(self): self.store.get_local_users_in_room = simple_async_mock([]) @defer.inlineCallbacks - def test_regex_user_id_prefix_match(self): + def test_regex_user_id_prefix_match( + self, + ) -> Generator["defer.Deferred[object]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@irc_foobar:matrix.org" self.assertTrue( @@ -60,7 +63,9 @@ def test_regex_user_id_prefix_match(self): ) @defer.inlineCallbacks - def test_regex_user_id_prefix_no_match(self): + def test_regex_user_id_prefix_no_match( + self, + ) -> Generator["defer.Deferred[object]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@someone_else:matrix.org" self.assertFalse( @@ -74,7 +79,9 @@ def test_regex_user_id_prefix_no_match(self): ) @defer.inlineCallbacks - def test_regex_room_member_is_checked(self): + def test_regex_room_member_is_checked( + self, + ) -> Generator["defer.Deferred[object]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) self.event.sender = "@someone_else:matrix.org" self.event.type = "m.room.member" @@ -90,7 +97,9 @@ def test_regex_room_member_is_checked(self): ) @defer.inlineCallbacks - def test_regex_room_id_match(self): + def test_regex_room_id_match( + self, + ) -> Generator["defer.Deferred[object]", object, None]: self.service.namespaces[ApplicationService.NS_ROOMS].append( _regex("!some_prefix.*some_suffix:matrix.org") ) @@ -106,7 +115,9 @@ def test_regex_room_id_match(self): ) @defer.inlineCallbacks - def test_regex_room_id_no_match(self): + def test_regex_room_id_no_match( + self, + ) -> Generator["defer.Deferred[object]", object, None]: self.service.namespaces[ApplicationService.NS_ROOMS].append( _regex("!some_prefix.*some_suffix:matrix.org") ) @@ -122,7 +133,9 @@ def test_regex_room_id_no_match(self): ) @defer.inlineCallbacks - def test_regex_alias_match(self): + def test_regex_alias_match( + self, + ) -> Generator["defer.Deferred[object]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -140,44 +153,46 @@ def test_regex_alias_match(self): ) ) - def test_non_exclusive_alias(self): + def test_non_exclusive_alias(self) -> None: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org", exclusive=False) ) self.assertFalse(self.service.is_exclusive_alias("#irc_foobar:matrix.org")) - def test_non_exclusive_room(self): + def test_non_exclusive_room(self) -> None: self.service.namespaces[ApplicationService.NS_ROOMS].append( _regex("!irc_.*:matrix.org", exclusive=False) ) self.assertFalse(self.service.is_exclusive_room("!irc_foobar:matrix.org")) - def test_non_exclusive_user(self): + def test_non_exclusive_user(self) -> None: self.service.namespaces[ApplicationService.NS_USERS].append( _regex("@irc_.*:matrix.org", exclusive=False) ) self.assertFalse(self.service.is_exclusive_user("@irc_foobar:matrix.org")) - def test_exclusive_alias(self): + def test_exclusive_alias(self) -> None: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org", exclusive=True) ) self.assertTrue(self.service.is_exclusive_alias("#irc_foobar:matrix.org")) - def test_exclusive_user(self): + def test_exclusive_user(self) -> None: self.service.namespaces[ApplicationService.NS_USERS].append( _regex("@irc_.*:matrix.org", exclusive=True) ) self.assertTrue(self.service.is_exclusive_user("@irc_foobar:matrix.org")) - def test_exclusive_room(self): + def test_exclusive_room(self) -> None: self.service.namespaces[ApplicationService.NS_ROOMS].append( _regex("!irc_.*:matrix.org", exclusive=True) ) self.assertTrue(self.service.is_exclusive_room("!irc_foobar:matrix.org")) @defer.inlineCallbacks - def test_regex_alias_no_match(self): + def test_regex_alias_no_match( + self, + ) -> Generator["defer.Deferred[object]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -196,7 +211,9 @@ def test_regex_alias_no_match(self): ) @defer.inlineCallbacks - def test_regex_multiple_matches(self): + def test_regex_multiple_matches( + self, + ) -> Generator["defer.Deferred[object]", object, None]: self.service.namespaces[ApplicationService.NS_ALIASES].append( _regex("#irc_.*:matrix.org") ) @@ -215,7 +232,9 @@ def test_regex_multiple_matches(self): ) @defer.inlineCallbacks - def test_interested_in_self(self): + def test_interested_in_self( + self, + ) -> Generator["defer.Deferred[object]", object, None]: # make sure invites get through self.service.sender = "@appservice:name" self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) @@ -233,7 +252,9 @@ def test_interested_in_self(self): ) @defer.inlineCallbacks - def test_member_list_match(self): + def test_member_list_match( + self, + ) -> Generator["defer.Deferred[object]", object, None]: self.service.namespaces[ApplicationService.NS_USERS].append(_regex("@irc_.*")) # Note that @irc_fo:here is the AS user. self.store.get_local_users_in_room = simple_async_mock( diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index 0a1ae83a2bbf..febcc1499d06 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -11,20 +11,28 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, cast from unittest.mock import Mock +from typing_extensions import TypeAlias + from twisted.internet import defer -from synapse.appservice import ApplicationServiceState +from synapse.appservice import ( + ApplicationService, + ApplicationServiceState, + TransactionOneTimeKeysCount, + TransactionUnusedFallbackKeys, +) from synapse.appservice.scheduler import ( ApplicationServiceScheduler, _Recoverer, _TransactionController, ) +from synapse.events import EventBase from synapse.logging.context import make_deferred_yieldable from synapse.server import HomeServer -from synapse.types import DeviceListUpdates +from synapse.types import DeviceListUpdates, JsonDict from synapse.util import Clock from tests import unittest @@ -37,18 +45,18 @@ class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.clock = MockClock() self.store = Mock() self.as_api = Mock() self.recoverer = Mock() self.recoverer_fn = Mock(return_value=self.recoverer) self.txnctrl = _TransactionController( - clock=self.clock, store=self.store, as_api=self.as_api + clock=cast(Clock, self.clock), store=self.store, as_api=self.as_api ) self.txnctrl.RECOVERER_CLASS = self.recoverer_fn - def test_single_service_up_txn_sent(self): + def test_single_service_up_txn_sent(self) -> None: # Test: The AS is up and the txn is successfully sent. service = Mock() events = [Mock(), Mock()] @@ -76,7 +84,7 @@ def test_single_service_up_txn_sent(self): self.assertEqual(0, len(self.txnctrl.recoverers)) # no recoverer made txn.complete.assert_called_once_with(self.store) # txn completed - def test_single_service_down(self): + def test_single_service_down(self) -> None: # Test: The AS is down so it shouldn't push; Recoverers will do it. # It should still make a transaction though. service = Mock() @@ -103,7 +111,7 @@ def test_single_service_down(self): self.assertEqual(0, txn.send.call_count) # txn not sent though self.assertEqual(0, txn.complete.call_count) # or completed - def test_single_service_up_txn_not_sent(self): + def test_single_service_up_txn_not_sent(self) -> None: # Test: The AS is up and the txn is not sent. A Recoverer is made and # started. service = Mock() @@ -139,26 +147,28 @@ def test_single_service_up_txn_not_sent(self): class ApplicationServiceSchedulerRecovererTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.clock = MockClock() self.as_api = Mock() self.store = Mock() self.service = Mock() self.callback = simple_async_mock() self.recoverer = _Recoverer( - clock=self.clock, + clock=cast(Clock, self.clock), as_api=self.as_api, store=self.store, service=self.service, callback=self.callback, ) - def test_recover_single_txn(self): + def test_recover_single_txn(self) -> None: txn = Mock() # return one txn to send, then no more old txns txns = [txn, None] - def take_txn(*args, **kwargs): + def take_txn( + *args: object, **kwargs: object + ) -> "defer.Deferred[Optional[Mock]]": return defer.succeed(txns.pop(0)) self.store.get_oldest_unsent_txn = Mock(side_effect=take_txn) @@ -177,12 +187,14 @@ def take_txn(*args, **kwargs): self.callback.assert_called_once_with(self.recoverer) self.assertEqual(self.recoverer.service, self.service) - def test_recover_retry_txn(self): + def test_recover_retry_txn(self) -> None: txn = Mock() txns = [txn, None] pop_txn = False - def take_txn(*args, **kwargs): + def take_txn( + *args: object, **kwargs: object + ) -> "defer.Deferred[Optional[Mock]]": if pop_txn: return defer.succeed(txns.pop(0)) else: @@ -214,8 +226,24 @@ def take_txn(*args, **kwargs): self.callback.assert_called_once_with(self.recoverer) +# Corresponds to synapse.appservice.scheduler._TransactionController.send +TxnCtrlArgs: TypeAlias = """ +defer.Deferred[ + Tuple[ + ApplicationService, + Sequence[EventBase], + Optional[List[JsonDict]], + Optional[List[JsonDict]], + Optional[TransactionOneTimeKeysCount], + Optional[TransactionUnusedFallbackKeys], + Optional[DeviceListUpdates], + ] +] +""" + + class ApplicationServiceSchedulerQueuerTestCase(unittest.HomeserverTestCase): - def prepare(self, reactor: "MemoryReactor", clock: Clock, hs: HomeServer): + def prepare(self, reactor: "MemoryReactor", clock: Clock, hs: HomeServer) -> None: self.scheduler = ApplicationServiceScheduler(hs) self.txn_ctrl = Mock() self.txn_ctrl.send = simple_async_mock() @@ -224,7 +252,7 @@ def prepare(self, reactor: "MemoryReactor", clock: Clock, hs: HomeServer): self.scheduler.txn_ctrl = self.txn_ctrl self.scheduler.queuer.txn_ctrl = self.txn_ctrl - def test_send_single_event_no_queue(self): + def test_send_single_event_no_queue(self) -> None: # Expect the event to be sent immediately. service = Mock(id=4) event = Mock() @@ -233,8 +261,8 @@ def test_send_single_event_no_queue(self): service, [event], [], [], None, None, DeviceListUpdates() ) - def test_send_single_event_with_queue(self): - d = defer.Deferred() + def test_send_single_event_with_queue(self) -> None: + d: TxnCtrlArgs = defer.Deferred() self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d)) service = Mock(id=4) event = Mock(event_id="first") @@ -257,22 +285,22 @@ def test_send_single_event_with_queue(self): ) self.assertEqual(2, self.txn_ctrl.send.call_count) - def test_multiple_service_queues(self): + def test_multiple_service_queues(self) -> None: # Tests that each service has its own queue, and that they don't block # on each other. srv1 = Mock(id=4) - srv_1_defer = defer.Deferred() + srv_1_defer: "defer.Deferred[EventBase]" = defer.Deferred() srv_1_event = Mock(event_id="srv1a") srv_1_event2 = Mock(event_id="srv1b") srv2 = Mock(id=6) - srv_2_defer = defer.Deferred() + srv_2_defer: "defer.Deferred[EventBase]" = defer.Deferred() srv_2_event = Mock(event_id="srv2a") srv_2_event2 = Mock(event_id="srv2b") send_return_list = [srv_1_defer, srv_2_defer] - def do_send(*args, **kwargs): + def do_send(*args: object, **kwargs: object) -> "defer.Deferred[EventBase]": return make_deferred_yieldable(send_return_list.pop(0)) self.txn_ctrl.send = Mock(side_effect=do_send) @@ -297,12 +325,12 @@ def do_send(*args, **kwargs): ) self.assertEqual(3, self.txn_ctrl.send.call_count) - def test_send_large_txns(self): - srv_1_defer = defer.Deferred() - srv_2_defer = defer.Deferred() + def test_send_large_txns(self) -> None: + srv_1_defer: "defer.Deferred[EventBase]" = defer.Deferred() + srv_2_defer: "defer.Deferred[EventBase]" = defer.Deferred() send_return_list = [srv_1_defer, srv_2_defer] - def do_send(*args, **kwargs): + def do_send(*args: object, **kwargs: object) -> "defer.Deferred[EventBase]": return make_deferred_yieldable(send_return_list.pop(0)) self.txn_ctrl.send = Mock(side_effect=do_send) @@ -328,7 +356,7 @@ def do_send(*args, **kwargs): ) self.assertEqual(3, self.txn_ctrl.send.call_count) - def test_send_single_ephemeral_no_queue(self): + def test_send_single_ephemeral_no_queue(self) -> None: # Expect the event to be sent immediately. service = Mock(id=4, name="service") event_list = [Mock(name="event")] @@ -337,7 +365,7 @@ def test_send_single_ephemeral_no_queue(self): service, [], event_list, [], None, None, DeviceListUpdates() ) - def test_send_multiple_ephemeral_no_queue(self): + def test_send_multiple_ephemeral_no_queue(self) -> None: # Expect the event to be sent immediately. service = Mock(id=4, name="service") event_list = [Mock(name="event1"), Mock(name="event2"), Mock(name="event3")] @@ -346,8 +374,8 @@ def test_send_multiple_ephemeral_no_queue(self): service, [], event_list, [], None, None, DeviceListUpdates() ) - def test_send_single_ephemeral_with_queue(self): - d = defer.Deferred() + def test_send_single_ephemeral_with_queue(self) -> None: + d: TxnCtrlArgs = defer.Deferred() self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d)) service = Mock(id=4) event_list_1 = [Mock(event_id="event1"), Mock(event_id="event2")] @@ -377,8 +405,8 @@ def test_send_single_ephemeral_with_queue(self): ) self.assertEqual(2, self.txn_ctrl.send.call_count) - def test_send_large_txns_ephemeral(self): - d = defer.Deferred() + def test_send_large_txns_ephemeral(self) -> None: + d: TxnCtrlArgs = defer.Deferred() self.txn_ctrl.send = Mock(return_value=make_deferred_yieldable(d)) # Expect the event to be sent immediately. service = Mock(id=4, name="service") From b275763c65dfa3c38432465c5bfba66a15c08040 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 6 Feb 2023 12:54:11 +0000 Subject: [PATCH 128/344] Expect type stubs from canonicaljson (#14992) * canonicaljson has stubs now since https://github.com/matrix-org/python-canonicaljson/pull/52 which is included in the lockfile version we use for type checking. * Changelog --- changelog.d/14992.misc | 1 + mypy.ini | 3 --- 2 files changed, 1 insertion(+), 3 deletions(-) create mode 100644 changelog.d/14992.misc diff --git a/changelog.d/14992.misc b/changelog.d/14992.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/14992.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 351b8ccade20..e66e8f1ffb3b 100644 --- a/mypy.ini +++ b/mypy.ini @@ -142,9 +142,6 @@ disallow_untyped_defs = True [mypy-authlib.*] ignore_missing_imports = True -[mypy-canonicaljson] -ignore_missing_imports = True - [mypy-ijson.*] ignore_missing_imports = True From 156cd88eefe7db100e5cdba48174c709975b93ca Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 6 Feb 2023 09:55:00 -0500 Subject: [PATCH 129/344] Add missing type hints to tests.replication. (#14987) --- changelog.d/14987.misc | 1 + mypy.ini | 3 + tests/replication/_base.py | 70 ++++++++------- tests/replication/http/test__base.py | 2 +- tests/replication/slave/storage/_base.py | 25 ++++-- .../replication/slave/storage/test_events.py | 85 +++++++++---------- .../tcp/streams/test_account_data.py | 4 +- tests/replication/tcp/streams/test_events.py | 18 ++-- .../tcp/streams/test_federation.py | 2 +- .../tcp/streams/test_partial_state.py | 2 +- tests/replication/tcp/streams/test_typing.py | 33 +++---- tests/replication/tcp/test_commands.py | 6 +- .../replication/tcp/test_remote_server_up.py | 8 +- tests/replication/test_auth.py | 14 +-- tests/replication/test_client_reader_shard.py | 4 +- tests/replication/test_federation_ack.py | 12 +-- .../test_federation_sender_shard.py | 10 ++- .../test_module_cache_invalidation.py | 2 +- tests/replication/test_multi_media_repo.py | 16 ++-- tests/replication/test_pusher_shard.py | 11 ++- .../test_sharded_event_persister.py | 14 +-- 21 files changed, 193 insertions(+), 149 deletions(-) create mode 100644 changelog.d/14987.misc diff --git a/changelog.d/14987.misc b/changelog.d/14987.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/14987.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index e66e8f1ffb3b..22f091145475 100644 --- a/mypy.ini +++ b/mypy.ini @@ -104,6 +104,9 @@ disallow_untyped_defs = True [mypy-tests.push.*] disallow_untyped_defs = True +[mypy-tests.replication.*] +disallow_untyped_defs = True + [mypy-tests.rest.*] disallow_untyped_defs = True diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 6a7174b333c9..46a8e2013e41 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -16,7 +16,9 @@ from typing import Any, Dict, List, Optional, Set, Tuple from twisted.internet.address import IPv4Address -from twisted.internet.protocol import Protocol +from twisted.internet.protocol import Protocol, connectionDone +from twisted.python.failure import Failure +from twisted.test.proto_helpers import MemoryReactor from twisted.web.resource import Resource from synapse.app.generic_worker import GenericWorkerServer @@ -30,6 +32,7 @@ ) from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest from tests.server import FakeTransport @@ -51,7 +54,7 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): if not hiredis: skip = "Requires hiredis" - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # build a replication server server_factory = ReplicationStreamProtocolFactory(hs) self.streamer = hs.get_replication_streamer() @@ -92,8 +95,8 @@ def prepare(self, reactor, clock, hs): repl_handler, ) - self._client_transport = None - self._server_transport = None + self._client_transport: Optional[FakeTransport] = None + self._server_transport: Optional[FakeTransport] = None def create_resource_dict(self) -> Dict[str, Resource]: d = super().create_resource_dict() @@ -107,10 +110,10 @@ def _get_worker_hs_config(self) -> dict: config["worker_replication_http_port"] = "8765" return config - def _build_replication_data_handler(self): + def _build_replication_data_handler(self) -> "TestReplicationDataHandler": return TestReplicationDataHandler(self.worker_hs) - def reconnect(self): + def reconnect(self) -> None: if self._client_transport: self.client.close() @@ -123,7 +126,7 @@ def reconnect(self): self._server_transport = FakeTransport(self.client, self.reactor) self.server.makeConnection(self._server_transport) - def disconnect(self): + def disconnect(self) -> None: if self._client_transport: self._client_transport = None self.client.close() @@ -132,7 +135,7 @@ def disconnect(self): self._server_transport = None self.server.close() - def replicate(self): + def replicate(self) -> None: """Tell the master side of replication that something has happened, and then wait for the replication to occur. """ @@ -168,7 +171,7 @@ def handle_http_replication_attempt(self) -> SynapseRequest: requests: List[SynapseRequest] = [] real_request_factory = channel.requestFactory - def request_factory(*args, **kwargs): + def request_factory(*args: Any, **kwargs: Any) -> SynapseRequest: request = real_request_factory(*args, **kwargs) requests.append(request) return request @@ -202,7 +205,7 @@ def request_factory(*args, **kwargs): def assert_request_is_get_repl_stream_updates( self, request: SynapseRequest, stream_name: str - ): + ) -> None: """Asserts that the given request is a HTTP replication request for fetching updates for given stream. """ @@ -244,7 +247,7 @@ def default_config(self) -> Dict[str, Any]: base["redis"] = {"enabled": True} return base - def setUp(self): + def setUp(self) -> None: super().setUp() # build a replication server @@ -287,7 +290,7 @@ def setUp(self): lambda: self._handle_http_replication_attempt(self.hs, 8765), ) - def create_test_resource(self): + def create_test_resource(self) -> ReplicationRestResource: """Overrides `HomeserverTestCase.create_test_resource`.""" # We override this so that it automatically registers all the HTTP # replication servlets, without having to explicitly do that in all @@ -301,7 +304,7 @@ def create_test_resource(self): return resource def make_worker_hs( - self, worker_app: str, extra_config: Optional[dict] = None, **kwargs + self, worker_app: str, extra_config: Optional[dict] = None, **kwargs: Any ) -> HomeServer: """Make a new worker HS instance, correctly connecting replcation stream to the master HS. @@ -385,14 +388,14 @@ def _get_worker_hs_config(self) -> dict: config["worker_replication_http_port"] = "8765" return config - def replicate(self): + def replicate(self) -> None: """Tell the master side of replication that something has happened, and then wait for the replication to occur. """ self.streamer.on_notifier_poke() self.pump() - def _handle_http_replication_attempt(self, hs, repl_port): + def _handle_http_replication_attempt(self, hs: HomeServer, repl_port: int) -> None: """Handles a connection attempt to the given HS replication HTTP listener on the given port. """ @@ -429,7 +432,7 @@ def _handle_http_replication_attempt(self, hs, repl_port): # inside `connecTCP` before the connection has been passed back to the # code that requested the TCP connection. - def connect_any_redis_attempts(self): + def connect_any_redis_attempts(self) -> None: """If redis is enabled we need to deal with workers connecting to a redis server. We don't want to use a real Redis server so we use a fake one. @@ -440,8 +443,11 @@ def connect_any_redis_attempts(self): self.assertEqual(host, "localhost") self.assertEqual(port, 6379) - client_protocol = client_factory.buildProtocol(None) - server_protocol = self._redis_server.buildProtocol(None) + client_address = IPv4Address("TCP", "127.0.0.1", 6379) + client_protocol = client_factory.buildProtocol(client_address) + + server_address = IPv4Address("TCP", host, port) + server_protocol = self._redis_server.buildProtocol(server_address) client_to_server_transport = FakeTransport( server_protocol, self.reactor, client_protocol @@ -463,7 +469,9 @@ def __init__(self, hs: HomeServer): # list of received (stream_name, token, row) tuples self.received_rdata_rows: List[Tuple[str, int, Any]] = [] - async def on_rdata(self, stream_name, instance_name, token, rows): + async def on_rdata( + self, stream_name: str, instance_name: str, token: int, rows: list + ) -> None: await super().on_rdata(stream_name, instance_name, token, rows) for r in rows: self.received_rdata_rows.append((stream_name, token, r)) @@ -472,28 +480,30 @@ async def on_rdata(self, stream_name, instance_name, token, rows): class FakeRedisPubSubServer: """A fake Redis server for pub/sub.""" - def __init__(self): + def __init__(self) -> None: self._subscribers_by_channel: Dict[ bytes, Set["FakeRedisPubSubProtocol"] ] = defaultdict(set) - def add_subscriber(self, conn, channel: bytes): + def add_subscriber(self, conn: "FakeRedisPubSubProtocol", channel: bytes) -> None: """A connection has called SUBSCRIBE""" self._subscribers_by_channel[channel].add(conn) - def remove_subscriber(self, conn): + def remove_subscriber(self, conn: "FakeRedisPubSubProtocol") -> None: """A connection has lost connection""" for subscribers in self._subscribers_by_channel.values(): subscribers.discard(conn) - def publish(self, conn, channel: bytes, msg) -> int: + def publish( + self, conn: "FakeRedisPubSubProtocol", channel: bytes, msg: object + ) -> int: """A connection want to publish a message to subscribers.""" for sub in self._subscribers_by_channel[channel]: sub.send(["message", channel, msg]) return len(self._subscribers_by_channel) - def buildProtocol(self, addr): + def buildProtocol(self, addr: IPv4Address) -> "FakeRedisPubSubProtocol": return FakeRedisPubSubProtocol(self) @@ -506,7 +516,7 @@ def __init__(self, server: FakeRedisPubSubServer): self._server = server self._reader = hiredis.Reader() - def dataReceived(self, data): + def dataReceived(self, data: bytes) -> None: self._reader.feed(data) # We might get multiple messages in one packet. @@ -523,7 +533,7 @@ def dataReceived(self, data): self.handle_command(msg[0], *msg[1:]) - def handle_command(self, command, *args): + def handle_command(self, command: bytes, *args: bytes) -> None: """Received a Redis command from the client.""" # We currently only support pub/sub. @@ -548,9 +558,9 @@ def handle_command(self, command, *args): self.send("PONG") else: - raise Exception(f"Unknown command: {command}") + raise Exception(f"Unknown command: {command!r}") - def send(self, msg): + def send(self, msg: object) -> None: """Send a message back to the client.""" assert self.transport is not None @@ -559,7 +569,7 @@ def send(self, msg): self.transport.write(raw) self.transport.flush() - def encode(self, obj): + def encode(self, obj: object) -> str: """Encode an object to its Redis format. Supports: strings/bytes, integers and list/tuples. @@ -581,5 +591,5 @@ def encode(self, obj): raise Exception("Unrecognized type for encoding redis: %r: %r", type(obj), obj) - def connectionLost(self, reason): + def connectionLost(self, reason: Failure = connectionDone) -> None: self._server.remove_subscriber(self) diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py index e03d9b4cc098..9be11ab80217 100644 --- a/tests/replication/http/test__base.py +++ b/tests/replication/http/test__base.py @@ -74,7 +74,7 @@ async def _handle_request( # type: ignore[override] class ReplicationEndpointCancellationTestCase(unittest.HomeserverTestCase): """Tests for `ReplicationEndpoint` cancellation.""" - def create_test_resource(self): + def create_test_resource(self) -> JsonResource: """Overrides `HomeserverTestCase.create_test_resource`.""" resource = JsonResource(self.hs) diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index c5705256e6fa..4c9b494344ae 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -13,35 +13,42 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Iterable, Optional from unittest.mock import Mock -from tests.replication._base import BaseStreamTestCase +from twisted.test.proto_helpers import MemoryReactor +from synapse.server import HomeServer +from synapse.util import Clock -class BaseSlavedStoreTestCase(BaseStreamTestCase): - def make_homeserver(self, reactor, clock): +from tests.replication._base import BaseStreamTestCase - hs = self.setup_test_homeserver(federation_client=Mock()) - return hs +class BaseSlavedStoreTestCase(BaseStreamTestCase): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + return self.setup_test_homeserver(federation_client=Mock()) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) self.reconnect() self.master_store = hs.get_datastores().main self.slaved_store = self.worker_hs.get_datastores().main - self._storage_controllers = hs.get_storage_controllers() + persistence = hs.get_storage_controllers().persistence + assert persistence is not None + self.persistance = persistence - def replicate(self): + def replicate(self) -> None: """Tell the master side of replication that something has happened, and then wait for the replication to occur. """ self.streamer.on_notifier_poke() self.pump(0.1) - def check(self, method, args, expected_result=None): + def check( + self, method: str, args: Iterable[Any], expected_result: Optional[Any] = None + ) -> None: master_result = self.get_success(getattr(self.master_store, method)(*args)) slaved_result = self.get_success(getattr(self.slaved_store, method)(*args)) if expected_result is not None: diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index dce71f73340d..ddca9d696c75 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -12,15 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import Iterable, Optional +from typing import Any, Callable, Iterable, List, Optional, Tuple from canonicaljson import encode_canonical_json from parameterized import parameterized +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import ReceiptTypes from synapse.api.room_versions import RoomVersions -from synapse.events import FrozenEvent, _EventInternalMetadata, make_event_from_dict +from synapse.events import EventBase, _EventInternalMetadata, make_event_from_dict +from synapse.events.snapshot import EventContext from synapse.handlers.room import RoomEventSource +from synapse.server import HomeServer from synapse.storage.databases.main.event_push_actions import ( NotifCounts, RoomNotifCounts, @@ -28,6 +32,7 @@ from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.roommember import GetRoomsForUserWithStreamOrdering, RoomsForUser from synapse.types import PersistedEventPosition +from synapse.util import Clock from tests.server import FakeTransport @@ -41,19 +46,19 @@ logger = logging.getLogger(__name__) -def dict_equals(self, other): +def dict_equals(self: EventBase, other: EventBase) -> bool: me = encode_canonical_json(self.get_pdu_json()) them = encode_canonical_json(other.get_pdu_json()) return me == them -def patch__eq__(cls): +def patch__eq__(cls: object) -> Callable[[], None]: eq = getattr(cls, "__eq__", None) - cls.__eq__ = dict_equals + cls.__eq__ = dict_equals # type: ignore[assignment] - def unpatch(): + def unpatch() -> None: if eq is not None: - cls.__eq__ = eq + cls.__eq__ = eq # type: ignore[assignment] return unpatch @@ -62,14 +67,14 @@ class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): STORE_TYPE = EventsWorkerStore - def setUp(self): + def setUp(self) -> None: # Patch up the equality operator for events so that we can check # whether lists of events match using assertEqual - self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(FrozenEvent)] - return super().setUp() + self.unpatches = [patch__eq__(_EventInternalMetadata), patch__eq__(EventBase)] + super().setUp() - def prepare(self, *args, **kwargs): - super().prepare(*args, **kwargs) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) self.get_success( self.master_store.store_room( @@ -80,10 +85,10 @@ def prepare(self, *args, **kwargs): ) ) - def tearDown(self): + def tearDown(self) -> None: [unpatch() for unpatch in self.unpatches] - def test_get_latest_event_ids_in_room(self): + def test_get_latest_event_ids_in_room(self) -> None: create = self.persist(type="m.room.create", key="", creator=USER_ID) self.replicate() self.check("get_latest_event_ids_in_room", (ROOM_ID,), [create.event_id]) @@ -97,7 +102,7 @@ def test_get_latest_event_ids_in_room(self): self.replicate() self.check("get_latest_event_ids_in_room", (ROOM_ID,), [join.event_id]) - def test_redactions(self): + def test_redactions(self) -> None: self.persist(type="m.room.create", key="", creator=USER_ID) self.persist(type="m.room.member", key=USER_ID, membership="join") @@ -117,7 +122,7 @@ def test_redactions(self): ) self.check("get_event", [msg.event_id], redacted) - def test_backfilled_redactions(self): + def test_backfilled_redactions(self) -> None: self.persist(type="m.room.create", key="", creator=USER_ID) self.persist(type="m.room.member", key=USER_ID, membership="join") @@ -139,7 +144,7 @@ def test_backfilled_redactions(self): ) self.check("get_event", [msg.event_id], redacted) - def test_invites(self): + def test_invites(self) -> None: self.persist(type="m.room.create", key="", creator=USER_ID) self.check("get_invited_rooms_for_local_user", [USER_ID_2], []) event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite") @@ -163,7 +168,7 @@ def test_invites(self): ) @parameterized.expand([(True,), (False,)]) - def test_push_actions_for_user(self, send_receipt: bool): + def test_push_actions_for_user(self, send_receipt: bool) -> None: self.persist(type="m.room.create", key="", creator=USER_ID) self.persist(type="m.room.member", key=USER_ID, membership="join") self.persist( @@ -219,7 +224,7 @@ def test_push_actions_for_user(self, send_receipt: bool): ), ) - def test_get_rooms_for_user_with_stream_ordering(self): + def test_get_rooms_for_user_with_stream_ordering(self) -> None: """Check that the cache on get_rooms_for_user_with_stream_ordering is invalidated by rows in the events stream """ @@ -243,7 +248,9 @@ def test_get_rooms_for_user_with_stream_ordering(self): {GetRoomsForUserWithStreamOrdering(ROOM_ID, expected_pos)}, ) - def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist(self): + def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist( + self, + ) -> None: """Check that current_state invalidation happens correctly with multiple events in the persistence batch. @@ -283,11 +290,7 @@ def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist(self): type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" ) msg, msgctx = self.build_event() - self.get_success( - self._storage_controllers.persistence.persist_events( - [(j2, j2ctx), (msg, msgctx)] - ) - ) + self.get_success(self.persistance.persist_events([(j2, j2ctx), (msg, msgctx)])) self.replicate() assert j2.internal_metadata.stream_ordering is not None @@ -339,7 +342,7 @@ def test_get_rooms_for_user_with_stream_ordering_with_multi_event_persist(self): event_id = 0 - def persist(self, backfill=False, **kwargs) -> FrozenEvent: + def persist(self, backfill: bool = False, **kwargs: Any) -> EventBase: """ Returns: The event that was persisted. @@ -348,32 +351,28 @@ def persist(self, backfill=False, **kwargs) -> FrozenEvent: if backfill: self.get_success( - self._storage_controllers.persistence.persist_events( - [(event, context)], backfilled=True - ) + self.persistance.persist_events([(event, context)], backfilled=True) ) else: - self.get_success( - self._storage_controllers.persistence.persist_event(event, context) - ) + self.get_success(self.persistance.persist_event(event, context)) return event def build_event( self, - sender=USER_ID, - room_id=ROOM_ID, - type="m.room.message", - key=None, + sender: str = USER_ID, + room_id: str = ROOM_ID, + type: str = "m.room.message", + key: Optional[str] = None, internal: Optional[dict] = None, - depth=None, - prev_events: Optional[list] = None, - auth_events: Optional[list] = None, - prev_state: Optional[list] = None, - redacts=None, + depth: Optional[int] = None, + prev_events: Optional[List[Tuple[str, dict]]] = None, + auth_events: Optional[List[str]] = None, + prev_state: Optional[List[str]] = None, + redacts: Optional[str] = None, push_actions: Iterable = frozenset(), - **content, - ): + **content: object, + ) -> Tuple[EventBase, EventContext]: prev_events = prev_events or [] auth_events = auth_events or [] prev_state = prev_state or [] diff --git a/tests/replication/tcp/streams/test_account_data.py b/tests/replication/tcp/streams/test_account_data.py index 50fbff5f324f..01df1be0473c 100644 --- a/tests/replication/tcp/streams/test_account_data.py +++ b/tests/replication/tcp/streams/test_account_data.py @@ -21,7 +21,7 @@ class AccountDataStreamTestCase(BaseStreamTestCase): - def test_update_function_room_account_data_limit(self): + def test_update_function_room_account_data_limit(self) -> None: """Test replication with many room account data updates""" store = self.hs.get_datastores().main @@ -67,7 +67,7 @@ def test_update_function_room_account_data_limit(self): self.assertEqual([], received_rows) - def test_update_function_global_account_data_limit(self): + def test_update_function_global_account_data_limit(self) -> None: """Test replication with many global account data updates""" store = self.hs.get_datastores().main diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 641a94133b1d..043dbe76afb0 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -12,7 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional +from typing import Any, List, Optional + +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, Membership from synapse.events import EventBase @@ -25,6 +27,8 @@ ) from synapse.rest import admin from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests.replication._base import BaseStreamTestCase from tests.test_utils.event_injection import inject_event, inject_member_event @@ -37,7 +41,7 @@ class EventsStreamTestCase(BaseStreamTestCase): room.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) self.user_id = self.register_user("u1", "pass") self.user_tok = self.login("u1", "pass") @@ -47,7 +51,7 @@ def prepare(self, reactor, clock, hs): self.room_id = self.helper.create_room_as(tok=self.user_tok) self.test_handler.received_rdata_rows.clear() - def test_update_function_event_row_limit(self): + def test_update_function_event_row_limit(self) -> None: """Test replication with many non-state events Checks that all events are correctly replicated when there are lots of @@ -102,7 +106,7 @@ def test_update_function_event_row_limit(self): self.assertEqual([], received_rows) - def test_update_function_huge_state_change(self): + def test_update_function_huge_state_change(self) -> None: """Test replication with many state events Ensures that all events are correctly replicated when there are lots of @@ -256,7 +260,7 @@ def test_update_function_huge_state_change(self): # "None" indicates the state has been deleted self.assertIsNone(sr.event_id) - def test_update_function_state_row_limit(self): + def test_update_function_state_row_limit(self) -> None: """Test replication with many state events over several stream ids.""" # we want to generate lots of state changes, but for this test, we want to @@ -376,7 +380,7 @@ def test_update_function_state_row_limit(self): self.assertEqual([], received_rows) - def test_backwards_stream_id(self): + def test_backwards_stream_id(self) -> None: """ Test that RDATA that comes after the current position should be discarded. """ @@ -437,7 +441,7 @@ def test_backwards_stream_id(self): event_count = 0 def _inject_test_event( - self, body: Optional[str] = None, sender: Optional[str] = None, **kwargs + self, body: Optional[str] = None, sender: Optional[str] = None, **kwargs: Any ) -> EventBase: if sender is None: sender = self.user_id diff --git a/tests/replication/tcp/streams/test_federation.py b/tests/replication/tcp/streams/test_federation.py index bcb82c9c8051..cdbdfaf057dc 100644 --- a/tests/replication/tcp/streams/test_federation.py +++ b/tests/replication/tcp/streams/test_federation.py @@ -26,7 +26,7 @@ def _get_worker_hs_config(self) -> dict: config["federation_sender_instances"] = ["federation_sender1"] return config - def test_catchup(self): + def test_catchup(self) -> None: """Basic test of catchup on reconnect Makes sure that updates sent while we are offline are received later. diff --git a/tests/replication/tcp/streams/test_partial_state.py b/tests/replication/tcp/streams/test_partial_state.py index 2c10eab4dbcd..38b5020ce0e1 100644 --- a/tests/replication/tcp/streams/test_partial_state.py +++ b/tests/replication/tcp/streams/test_partial_state.py @@ -23,7 +23,7 @@ class PartialStateStreamsTestCase(BaseMultiWorkerStreamTestCase): hijack_auth = True user_id = "@bob:test" - def setUp(self): + def setUp(self) -> None: super().setUp() self.store = self.hs.get_datastores().main diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py index 9a229dd23f4a..68de5d1cc280 100644 --- a/tests/replication/tcp/streams/test_typing.py +++ b/tests/replication/tcp/streams/test_typing.py @@ -27,10 +27,11 @@ class TypingStreamTestCase(BaseStreamTestCase): - def _build_replication_data_handler(self): - return Mock(wraps=super()._build_replication_data_handler()) + def _build_replication_data_handler(self) -> Mock: + self.mock_handler = Mock(wraps=super()._build_replication_data_handler()) + return self.mock_handler - def test_typing(self): + def test_typing(self) -> None: typing = self.hs.get_typing_handler() self.reconnect() @@ -43,8 +44,8 @@ def test_typing(self): request = self.handle_http_replication_attempt() self.assert_request_is_get_repl_stream_updates(request, "typing") - self.test_handler.on_rdata.assert_called_once() - stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0] + self.mock_handler.on_rdata.assert_called_once() + stream_name, _, token, rdata_rows = self.mock_handler.on_rdata.call_args[0] self.assertEqual(stream_name, "typing") self.assertEqual(1, len(rdata_rows)) row: TypingStream.TypingStreamRow = rdata_rows[0] @@ -54,11 +55,11 @@ def test_typing(self): # Now let's disconnect and insert some data. self.disconnect() - self.test_handler.on_rdata.reset_mock() + self.mock_handler.on_rdata.reset_mock() typing._push_update(member=RoomMember(ROOM_ID, USER_ID), typing=False) - self.test_handler.on_rdata.assert_not_called() + self.mock_handler.on_rdata.assert_not_called() self.reconnect() self.pump(0.1) @@ -71,15 +72,15 @@ def test_typing(self): assert request.args is not None self.assertEqual(int(request.args[b"from_token"][0]), token) - self.test_handler.on_rdata.assert_called_once() - stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0] + self.mock_handler.on_rdata.assert_called_once() + stream_name, _, token, rdata_rows = self.mock_handler.on_rdata.call_args[0] self.assertEqual(stream_name, "typing") self.assertEqual(1, len(rdata_rows)) row = rdata_rows[0] self.assertEqual(ROOM_ID, row.room_id) self.assertEqual([], row.user_ids) - def test_reset(self): + def test_reset(self) -> None: """ Test what happens when a typing stream resets. @@ -98,8 +99,8 @@ def test_reset(self): request = self.handle_http_replication_attempt() self.assert_request_is_get_repl_stream_updates(request, "typing") - self.test_handler.on_rdata.assert_called_once() - stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0] + self.mock_handler.on_rdata.assert_called_once() + stream_name, _, token, rdata_rows = self.mock_handler.on_rdata.call_args[0] self.assertEqual(stream_name, "typing") self.assertEqual(1, len(rdata_rows)) row: TypingStream.TypingStreamRow = rdata_rows[0] @@ -134,15 +135,15 @@ def test_reset(self): self.assert_request_is_get_repl_stream_updates(request, "typing") # Reset the test code. - self.test_handler.on_rdata.reset_mock() - self.test_handler.on_rdata.assert_not_called() + self.mock_handler.on_rdata.reset_mock() + self.mock_handler.on_rdata.assert_not_called() # Push additional data. typing._push_update(member=RoomMember(ROOM_ID_2, USER_ID_2), typing=False) self.reactor.advance(0) - self.test_handler.on_rdata.assert_called_once() - stream_name, _, token, rdata_rows = self.test_handler.on_rdata.call_args[0] + self.mock_handler.on_rdata.assert_called_once() + stream_name, _, token, rdata_rows = self.mock_handler.on_rdata.call_args[0] self.assertEqual(stream_name, "typing") self.assertEqual(1, len(rdata_rows)) row = rdata_rows[0] diff --git a/tests/replication/tcp/test_commands.py b/tests/replication/tcp/test_commands.py index cca7ebb7195d..5d6b72b16d84 100644 --- a/tests/replication/tcp/test_commands.py +++ b/tests/replication/tcp/test_commands.py @@ -21,12 +21,12 @@ class ParseCommandTestCase(TestCase): - def test_parse_one_word_command(self): + def test_parse_one_word_command(self) -> None: line = "REPLICATE" cmd = parse_command_from_line(line) self.assertIsInstance(cmd, ReplicateCommand) - def test_parse_rdata(self): + def test_parse_rdata(self) -> None: line = 'RDATA events master 6287863 ["ev", ["$eventid", "!roomid", "type", null, null, null]]' cmd = parse_command_from_line(line) assert isinstance(cmd, RdataCommand) @@ -34,7 +34,7 @@ def test_parse_rdata(self): self.assertEqual(cmd.instance_name, "master") self.assertEqual(cmd.token, 6287863) - def test_parse_rdata_batch(self): + def test_parse_rdata_batch(self) -> None: line = 'RDATA presence master batch ["@foo:example.com", "online"]' cmd = parse_command_from_line(line) assert isinstance(cmd, RdataCommand) diff --git a/tests/replication/tcp/test_remote_server_up.py b/tests/replication/tcp/test_remote_server_up.py index 545f11acd1bf..b75fc05fd55b 100644 --- a/tests/replication/tcp/test_remote_server_up.py +++ b/tests/replication/tcp/test_remote_server_up.py @@ -16,15 +16,17 @@ from twisted.internet.address import IPv4Address from twisted.internet.interfaces import IProtocol -from twisted.test.proto_helpers import StringTransport +from twisted.test.proto_helpers import MemoryReactor, StringTransport from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory +from synapse.server import HomeServer +from synapse.util import Clock from tests.unittest import HomeserverTestCase class RemoteServerUpTestCase(HomeserverTestCase): - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.factory = ReplicationStreamProtocolFactory(hs) def _make_client(self) -> Tuple[IProtocol, StringTransport]: @@ -40,7 +42,7 @@ def _make_client(self) -> Tuple[IProtocol, StringTransport]: return proto, transport - def test_relay(self): + def test_relay(self) -> None: """Test that Synapse will relay REMOTE_SERVER_UP commands to all other connections, but not the one that sent it. """ diff --git a/tests/replication/test_auth.py b/tests/replication/test_auth.py index 5d7a89e0c702..98602371e467 100644 --- a/tests/replication/test_auth.py +++ b/tests/replication/test_auth.py @@ -13,7 +13,11 @@ # limitations under the License. import logging +from twisted.test.proto_helpers import MemoryReactor + from synapse.rest.client import register +from synapse.server import HomeServer +from synapse.util import Clock from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import FakeChannel, make_request @@ -27,7 +31,7 @@ class WorkerAuthenticationTestCase(BaseMultiWorkerStreamTestCase): servlets = [register.register_servlets] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() # This isn't a real configuration option but is used to provide the main # homeserver and worker homeserver different options. @@ -77,7 +81,7 @@ def _test_register(self) -> FakeChannel: {"auth": {"session": session, "type": "m.login.dummy"}}, ) - def test_no_auth(self): + def test_no_auth(self) -> None: """With no authentication the request should finish.""" channel = self._test_register() self.assertEqual(channel.code, 200) @@ -86,7 +90,7 @@ def test_no_auth(self): self.assertEqual(channel.json_body["user_id"], "@user:test") @override_config({"main_replication_secret": "my-secret"}) - def test_missing_auth(self): + def test_missing_auth(self) -> None: """If the main process expects a secret that is not provided, an error results.""" channel = self._test_register() self.assertEqual(channel.code, 500) @@ -97,13 +101,13 @@ def test_missing_auth(self): "worker_replication_secret": "wrong-secret", } ) - def test_unauthorized(self): + def test_unauthorized(self) -> None: """If the main process receives the wrong secret, an error results.""" channel = self._test_register() self.assertEqual(channel.code, 500) @override_config({"worker_replication_secret": "my-secret"}) - def test_authorized(self): + def test_authorized(self) -> None: """The request should finish when the worker provides the authentication header.""" channel = self._test_register() self.assertEqual(channel.code, 200) diff --git a/tests/replication/test_client_reader_shard.py b/tests/replication/test_client_reader_shard.py index eb5b376534b9..eca503376110 100644 --- a/tests/replication/test_client_reader_shard.py +++ b/tests/replication/test_client_reader_shard.py @@ -33,7 +33,7 @@ def _get_worker_hs_config(self) -> dict: config["worker_replication_http_port"] = "8765" return config - def test_register_single_worker(self): + def test_register_single_worker(self) -> None: """Test that registration works when using a single generic worker.""" worker_hs = self.make_worker_hs("synapse.app.generic_worker") site = self._hs_to_site[worker_hs] @@ -63,7 +63,7 @@ def test_register_single_worker(self): # We're given a registered user. self.assertEqual(channel_2.json_body["user_id"], "@user:test") - def test_register_multi_worker(self): + def test_register_multi_worker(self) -> None: """Test that registration works when using multiple generic workers.""" worker_hs_1 = self.make_worker_hs("synapse.app.generic_worker") worker_hs_2 = self.make_worker_hs("synapse.app.generic_worker") diff --git a/tests/replication/test_federation_ack.py b/tests/replication/test_federation_ack.py index 63b1dd40b5b9..12668b34c5aa 100644 --- a/tests/replication/test_federation_ack.py +++ b/tests/replication/test_federation_ack.py @@ -14,10 +14,14 @@ from unittest import mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.app.generic_worker import GenericWorkerServer from synapse.replication.tcp.commands import FederationAckCommand from synapse.replication.tcp.protocol import IReplicationConnection from synapse.replication.tcp.streams.federation import FederationStream +from synapse.server import HomeServer +from synapse.util import Clock from tests.unittest import HomeserverTestCase @@ -30,12 +34,10 @@ def default_config(self) -> dict: config["federation_sender_instances"] = ["federation_sender1"] return config - def make_homeserver(self, reactor, clock): - hs = self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer) - - return hs + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + return self.setup_test_homeserver(homeserver_to_use=GenericWorkerServer) - def test_federation_ack_sent(self): + def test_federation_ack_sent(self) -> None: """A FEDERATION_ACK should be sent back after each RDATA federation This test checks that the federation sender is correctly sending back diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index c28073b8f797..89380e25b59a 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -40,7 +40,7 @@ class FederationSenderTestCase(BaseMultiWorkerStreamTestCase): room.register_servlets, ] - def test_send_event_single_sender(self): + def test_send_event_single_sender(self) -> None: """Test that using a single federation sender worker correctly sends a new event. """ @@ -71,7 +71,7 @@ def test_send_event_single_sender(self): self.assertEqual(mock_client.put_json.call_args[0][0], "other_server") self.assertTrue(mock_client.put_json.call_args[1]["data"].get("pdus")) - def test_send_event_sharded(self): + def test_send_event_sharded(self) -> None: """Test that using two federation sender workers correctly sends new events. """ @@ -138,7 +138,7 @@ def test_send_event_sharded(self): self.assertTrue(sent_on_1) self.assertTrue(sent_on_2) - def test_send_typing_sharded(self): + def test_send_typing_sharded(self) -> None: """Test that using two federation sender workers correctly sends new typing EDUs. """ @@ -215,7 +215,9 @@ def test_send_typing_sharded(self): self.assertTrue(sent_on_1) self.assertTrue(sent_on_2) - def create_room_with_remote_server(self, user, token, remote_server="other_server"): + def create_room_with_remote_server( + self, user: str, token: str, remote_server: str = "other_server" + ) -> str: room = self.helper.create_room_as(user, tok=token) store = self.hs.get_datastores().main federation = self.hs.get_federation_event_handler() diff --git a/tests/replication/test_module_cache_invalidation.py b/tests/replication/test_module_cache_invalidation.py index b93cae67d3c4..9c4fbda71b87 100644 --- a/tests/replication/test_module_cache_invalidation.py +++ b/tests/replication/test_module_cache_invalidation.py @@ -39,7 +39,7 @@ class ModuleCacheInvalidationTestCase(BaseMultiWorkerStreamTestCase): synapse.rest.admin.register_servlets, ] - def test_module_cache_full_invalidation(self): + def test_module_cache_full_invalidation(self) -> None: main_cache = TestCache() self.hs.get_module_api().register_cached_function(main_cache.cached_function) diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index 96cdf2c45b16..1527b4a82d84 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -18,12 +18,14 @@ from twisted.internet.interfaces import IOpenSSLServerConnectionCreator from twisted.internet.protocol import Factory from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol +from twisted.test.proto_helpers import MemoryReactor from twisted.web.http import HTTPChannel from twisted.web.server import Request from synapse.rest import admin from synapse.rest.client import login from synapse.server import HomeServer +from synapse.util import Clock from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file from tests.replication._base import BaseMultiWorkerStreamTestCase @@ -43,13 +45,13 @@ class MediaRepoShardTestCase(BaseMultiWorkerStreamTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("user", "pass") self.access_token = self.login("user", "pass") self.reactor.lookups["example.com"] = "1.2.3.4" - def default_config(self): + def default_config(self) -> dict: conf = super().default_config() conf["federation_custom_ca_list"] = [get_test_ca_cert_file()] return conf @@ -122,7 +124,7 @@ def _get_media_req( return channel, request - def test_basic(self): + def test_basic(self) -> None: """Test basic fetching of remote media from a single worker.""" hs1 = self.make_worker_hs("synapse.app.generic_worker") @@ -138,7 +140,7 @@ def test_basic(self): self.assertEqual(channel.code, 200) self.assertEqual(channel.result["body"], b"Hello!") - def test_download_simple_file_race(self): + def test_download_simple_file_race(self) -> None: """Test that fetching remote media from two different processes at the same time works. """ @@ -177,7 +179,7 @@ def test_download_simple_file_race(self): # We expect only one new file to have been persisted. self.assertEqual(start_count + 1, self._count_remote_media()) - def test_download_image_race(self): + def test_download_image_race(self) -> None: """Test that fetching remote *images* from two different processes at the same time works. @@ -229,7 +231,7 @@ def _count_remote_thumbnails(self) -> int: return sum(len(files) for _, _, files in os.walk(path)) -def get_connection_factory(): +def get_connection_factory() -> TestServerTLSConnectionFactory: # this needs to happen once, but not until we are ready to run the first test global test_server_connection_factory if test_server_connection_factory is None: @@ -263,6 +265,6 @@ def _build_test_server( return server_tls_factory.buildProtocol(None) -def _log_request(request): +def _log_request(request: Request) -> None: """Implements Factory.log, which is expected by Request.finish""" logger.info("Completed request %s", request) diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index ca18ad655394..9345cfbeb266 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -15,9 +15,12 @@ from unittest.mock import Mock from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.rest import admin from synapse.rest.client import login, room +from synapse.server import HomeServer +from synapse.util import Clock from tests.replication._base import BaseMultiWorkerStreamTestCase @@ -33,12 +36,12 @@ class PusherShardTestCase(BaseMultiWorkerStreamTestCase): login.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Register a user who sends a message that we'll get notified about self.other_user_id = self.register_user("otheruser", "pass") self.other_access_token = self.login("otheruser", "pass") - def _create_pusher_and_send_msg(self, localpart): + def _create_pusher_and_send_msg(self, localpart: str) -> str: # Create a user that will get push notifications user_id = self.register_user(localpart, "pass") access_token = self.login(localpart, "pass") @@ -79,7 +82,7 @@ def _create_pusher_and_send_msg(self, localpart): return event_id - def test_send_push_single_worker(self): + def test_send_push_single_worker(self) -> None: """Test that registration works when using a pusher worker.""" http_client_mock = Mock(spec_set=["post_json_get_json"]) http_client_mock.post_json_get_json.side_effect = ( @@ -109,7 +112,7 @@ def test_send_push_single_worker(self): ], ) - def test_send_push_multiple_workers(self): + def test_send_push_multiple_workers(self) -> None: """Test that registration works when using sharded pusher workers.""" http_client_mock1 = Mock(spec_set=["post_json_get_json"]) http_client_mock1.post_json_get_json.side_effect = ( diff --git a/tests/replication/test_sharded_event_persister.py b/tests/replication/test_sharded_event_persister.py index 541d3902860c..7f9cc67e735a 100644 --- a/tests/replication/test_sharded_event_persister.py +++ b/tests/replication/test_sharded_event_persister.py @@ -14,9 +14,13 @@ import logging from unittest.mock import patch +from twisted.test.proto_helpers import MemoryReactor + from synapse.rest import admin from synapse.rest.client import login, room, sync +from synapse.server import HomeServer from synapse.storage.util.id_generators import MultiWriterIdGenerator +from synapse.util import Clock from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import make_request @@ -34,7 +38,7 @@ class EventPersisterShardTestCase(BaseMultiWorkerStreamTestCase): sync.register_servlets, ] - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Register a user who sends a message that we'll get notified about self.other_user_id = self.register_user("otheruser", "pass") self.other_access_token = self.login("otheruser", "pass") @@ -42,7 +46,7 @@ def prepare(self, reactor, clock, hs): self.room_creator = self.hs.get_room_creation_handler() self.store = hs.get_datastores().main - def default_config(self): + def default_config(self) -> dict: conf = super().default_config() conf["stream_writers"] = {"events": ["worker1", "worker2"]} conf["instance_map"] = { @@ -51,7 +55,7 @@ def default_config(self): } return conf - def _create_room(self, room_id: str, user_id: str, tok: str): + def _create_room(self, room_id: str, user_id: str, tok: str) -> None: """Create a room with given room_id""" # We control the room ID generation by patching out the @@ -62,7 +66,7 @@ def _create_room(self, room_id: str, user_id: str, tok: str): mock.side_effect = lambda: room_id self.helper.create_room_as(user_id, tok=tok) - def test_basic(self): + def test_basic(self) -> None: """Simple test to ensure that multiple rooms can be created and joined, and that different rooms get handled by different instances. """ @@ -112,7 +116,7 @@ def test_basic(self): self.assertTrue(persisted_on_1) self.assertTrue(persisted_on_2) - def test_vector_clock_token(self): + def test_vector_clock_token(self) -> None: """Tests that using a stream token with a vector clock component works correctly with basic /sync and /messages usage. """ From 0f34abed7cb83c14bb100ef0ac37188acd5810c3 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 6 Feb 2023 16:05:06 +0000 Subject: [PATCH 130/344] Type hints for tests.federation (#14991) * Make tests.federation pass mypy * Untyped defs in tests.federation.transport * test methods return None * Remaining type hints in tests.federation * Changelog * Avoid an uncessary type-ignore --- changelog.d/14991.misc | 1 + mypy.ini | 4 +- tests/federation/test_complexity.py | 18 ++--- tests/federation/test_federation_catch_up.py | 52 +++++++------ tests/federation/test_federation_client.py | 12 +-- tests/federation/test_federation_sender.py | 74 ++++++++++--------- tests/federation/test_federation_server.py | 18 ++--- .../federation/transport/server/test__base.py | 4 +- tests/federation/transport/test_knocking.py | 32 +++++--- tests/federation/transport/test_server.py | 6 +- 10 files changed, 127 insertions(+), 94 deletions(-) create mode 100644 changelog.d/14991.misc diff --git a/changelog.d/14991.misc b/changelog.d/14991.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/14991.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 22f091145475..2f8806e1f45c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,8 +32,6 @@ exclude = (?x) |synapse/storage/databases/main/cache.py |synapse/storage/schema/ - |tests/federation/test_federation_catch_up.py - |tests/federation/test_federation_sender.py |tests/http/federation/test_matrix_federation_agent.py |tests/http/federation/test_srv_resolver.py |tests/http/test_proxyagent.py @@ -89,7 +87,7 @@ disallow_untyped_defs = True [mypy-tests.events.*] disallow_untyped_defs = True -[mypy-tests.federation.transport.test_client] +[mypy-tests.federation.*] disallow_untyped_defs = True [mypy-tests.handlers.*] diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 9f1115dd23b8..d667dd27bf40 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -17,7 +17,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.rest import admin from synapse.rest.client import login, room -from synapse.types import UserID +from synapse.types import JsonDict, UserID from tests import unittest from tests.test_utils import make_awaitable @@ -31,12 +31,12 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): login.register_servlets, ] - def default_config(self): + def default_config(self) -> JsonDict: config = super().default_config() config["limit_remote_rooms"] = {"enabled": True, "complexity": 0.05} return config - def test_complexity_simple(self): + def test_complexity_simple(self) -> None: u1 = self.register_user("u1", "pass") u1_token = self.login("u1", "pass") @@ -66,7 +66,7 @@ def test_complexity_simple(self): complexity = channel.json_body["v1"] self.assertEqual(complexity, 1.23) - def test_join_too_large(self): + def test_join_too_large(self) -> None: u1 = self.register_user("u1", "pass") @@ -95,7 +95,7 @@ def test_join_too_large(self): self.assertEqual(f.value.code, 400, f.value) self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) - def test_join_too_large_admin(self): + def test_join_too_large_admin(self) -> None: # Check whether an admin can join if option "admins_can_join" is undefined, # this option defaults to false, so the join should fail. @@ -126,7 +126,7 @@ def test_join_too_large_admin(self): self.assertEqual(f.value.code, 400, f.value) self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) - def test_join_too_large_once_joined(self): + def test_join_too_large_once_joined(self) -> None: u1 = self.register_user("u1", "pass") u1_token = self.login("u1", "pass") @@ -180,7 +180,7 @@ class RoomComplexityAdminTests(unittest.FederatingHomeserverTestCase): login.register_servlets, ] - def default_config(self): + def default_config(self) -> JsonDict: config = super().default_config() config["limit_remote_rooms"] = { "enabled": True, @@ -189,7 +189,7 @@ def default_config(self): } return config - def test_join_too_large_no_admin(self): + def test_join_too_large_no_admin(self) -> None: # A user which is not an admin should not be able to join a remote room # which is too complex. @@ -220,7 +220,7 @@ def test_join_too_large_no_admin(self): self.assertEqual(f.value.code, 400, f.value) self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) - def test_join_too_large_admin(self): + def test_join_too_large_admin(self) -> None: # An admin should be able to join rooms where a complexity check fails. u1 = self.register_user("u1", "pass", admin=True) diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index b8fee7289838..a986b15f0a87 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -1,13 +1,17 @@ -from typing import List, Tuple +from typing import Callable, List, Optional, Tuple from unittest.mock import Mock +from twisted.test.proto_helpers import MemoryReactor + from synapse.api.constants import EventTypes from synapse.events import EventBase from synapse.federation.sender import PerDestinationQueue, TransactionManager -from synapse.federation.units import Edu +from synapse.federation.units import Edu, Transaction from synapse.rest import admin from synapse.rest.client import login, room +from synapse.server import HomeServer from synapse.types import JsonDict +from synapse.util import Clock from synapse.util.retryutils import NotRetryingDestination from tests.test_utils import event_injection, make_awaitable @@ -28,23 +32,25 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return self.setup_test_homeserver( federation_transport_client=Mock(spec=["send_transaction"]), ) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # stub out get_current_hosts_in_room - state_handler = hs.get_state_handler() + state_storage_controller = hs.get_storage_controllers().state # This mock is crucial for destination_rooms to be populated. - state_handler.get_current_hosts_in_room = Mock( - return_value=make_awaitable(["test", "host2"]) + # TODO: this seems to no longer be the case---tests pass with this mock + # commented out. + state_storage_controller.get_current_hosts_in_room = Mock( # type: ignore[assignment] + return_value=make_awaitable({"test", "host2"}) ) # whenever send_transaction is called, record the pdu data - self.pdus = [] - self.failed_pdus = [] + self.pdus: List[JsonDict] = [] + self.failed_pdus: List[JsonDict] = [] self.is_online = True self.hs.get_federation_transport_client().send_transaction.side_effect = ( self.record_transaction @@ -55,8 +61,13 @@ def default_config(self) -> JsonDict: config["federation_sender_instances"] = None return config - async def record_transaction(self, txn, json_cb): - if self.is_online: + async def record_transaction( + self, txn: Transaction, json_cb: Optional[Callable[[], JsonDict]] + ) -> JsonDict: + if json_cb is None: + # The tests seem to expect that this method raises in this situation. + raise Exception("Blank json_cb") + elif self.is_online: data = json_cb() self.pdus.extend(data["pdus"]) return {} @@ -92,7 +103,7 @@ def get_destination_room(self, room: str, destination: str = "host2") -> dict: )[0] return {"event_id": event_id, "stream_ordering": stream_ordering} - def test_catch_up_destination_rooms_tracking(self): + def test_catch_up_destination_rooms_tracking(self) -> None: """ Tests that we populate the `destination_rooms` table as needed. """ @@ -117,7 +128,7 @@ def test_catch_up_destination_rooms_tracking(self): self.assertEqual(row_2["event_id"], event_id_2) self.assertEqual(row_1["stream_ordering"], row_2["stream_ordering"] - 1) - def test_catch_up_last_successful_stream_ordering_tracking(self): + def test_catch_up_last_successful_stream_ordering_tracking(self) -> None: """ Tests that we populate the `destination_rooms` table as needed. """ @@ -174,7 +185,7 @@ def test_catch_up_last_successful_stream_ordering_tracking(self): "Send succeeded but not marked as last_successful_stream_ordering", ) - def test_catch_up_from_blank_state(self): + def test_catch_up_from_blank_state(self) -> None: """ Runs an overall test of federation catch-up from scratch. Further tests will focus on more narrow aspects and edge-cases, but I @@ -261,16 +272,15 @@ async def fake_send( destination_tm: str, pending_pdus: List[EventBase], _pending_edus: List[Edu], - ) -> bool: + ) -> None: assert destination == destination_tm results_list.extend(pending_pdus) - return True # success! - transaction_manager.send_new_transaction = fake_send + transaction_manager.send_new_transaction = fake_send # type: ignore[assignment] return per_dest_queue, results_list - def test_catch_up_loop(self): + def test_catch_up_loop(self) -> None: """ Tests the behaviour of _catch_up_transmission_loop. """ @@ -334,7 +344,7 @@ def test_catch_up_loop(self): event_5.internal_metadata.stream_ordering, ) - def test_catch_up_on_synapse_startup(self): + def test_catch_up_on_synapse_startup(self) -> None: """ Tests the behaviour of get_catch_up_outstanding_destinations and _wake_destinations_needing_catchup. @@ -412,7 +422,7 @@ def test_catch_up_on_synapse_startup(self): # patch wake_destination to just count the destinations instead woken = [] - def wake_destination_track(destination): + def wake_destination_track(destination: str) -> None: woken.append(destination) self.hs.get_federation_sender().wake_destination = wake_destination_track @@ -432,7 +442,7 @@ def wake_destination_track(destination): # - all destinations are woken exactly once; they appear once in woken. self.assertCountEqual(woken, server_names[:-1]) - def test_not_latest_event(self): + def test_not_latest_event(self) -> None: """Test that we send the latest event in the room even if its not ours.""" per_dest_queue, sent_pdus = self.make_fake_destination_queue() diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index e67f4058260f..86e1236501fd 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -36,7 +36,9 @@ class FederationClientTest(FederatingHomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: super().prepare(reactor, clock, homeserver) # mock out the Agent used by the federation client, which is easier than @@ -51,7 +53,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer): self.creator = f"@creator:{self.OTHER_SERVER_NAME}" self.test_room_id = "!room_id" - def test_get_room_state(self): + def test_get_room_state(self) -> None: # mock up some events to use in the response. # In real life, these would have things in `prev_events` and `auth_events`, but that's # a bit annoying to mock up, and the code under test doesn't care, so we don't bother. @@ -140,7 +142,7 @@ def test_get_room_state(self): ["m.room.create", "m.room.member", "m.room.power_levels"], ) - def test_get_pdu_returns_nothing_when_event_does_not_exist(self): + def test_get_pdu_returns_nothing_when_event_does_not_exist(self) -> None: """No event should be returned when the event does not exist""" pulled_pdu_info = self.get_success( self.hs.get_federation_client().get_pdu( @@ -151,11 +153,11 @@ def test_get_pdu_returns_nothing_when_event_does_not_exist(self): ) self.assertEqual(pulled_pdu_info, None) - def test_get_pdu(self): + def test_get_pdu(self) -> None: """Test to make sure an event is returned by `get_pdu()`""" self._get_pdu_once() - def test_get_pdu_event_from_cache_is_pristine(self): + def test_get_pdu_event_from_cache_is_pristine(self) -> None: """Test that modifications made to events returned by `get_pdu()` do not propagate back to to the internal cache (events returned should be a copy). diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 8692d8190f66..ddeffe1ad53b 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -11,18 +11,22 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional +from typing import Callable, FrozenSet, List, Optional, Set from unittest.mock import Mock from signedjson import key, sign from signedjson.types import BaseKey, SigningKey from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EduTypes, RoomEncryptionAlgorithms +from synapse.federation.units import Transaction from synapse.rest import admin from synapse.rest.client import login +from synapse.server import HomeServer from synapse.types import JsonDict, ReadReceipt +from synapse.util import Clock from tests.test_utils import make_awaitable from tests.unittest import HomeserverTestCase @@ -36,16 +40,16 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): re-enabled for the main process. """ - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver( federation_transport_client=Mock(spec=["send_transaction"]), ) - hs.get_storage_controllers().state.get_current_hosts_in_room = Mock( + hs.get_storage_controllers().state.get_current_hosts_in_room = Mock( # type: ignore[assignment] return_value=make_awaitable({"test", "host2"}) ) - hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = ( + hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = ( # type: ignore[assignment] hs.get_storage_controllers().state.get_current_hosts_in_room ) @@ -56,7 +60,7 @@ def default_config(self) -> JsonDict: config["federation_sender_instances"] = None return config - def test_send_receipts(self): + def test_send_receipts(self) -> None: mock_send_transaction = ( self.hs.get_federation_transport_client().send_transaction ) @@ -98,7 +102,7 @@ def test_send_receipts(self): ], ) - def test_send_receipts_thread(self): + def test_send_receipts_thread(self) -> None: mock_send_transaction = ( self.hs.get_federation_transport_client().send_transaction ) @@ -174,7 +178,7 @@ def test_send_receipts_thread(self): ], ) - def test_send_receipts_with_backoff(self): + def test_send_receipts_with_backoff(self) -> None: """Send two receipts in quick succession; the second should be flushed, but only after 20ms""" mock_send_transaction = ( @@ -272,51 +276,55 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): login.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return self.setup_test_homeserver( federation_transport_client=Mock( spec=["send_transaction", "query_user_devices"] ), ) - def default_config(self): + def default_config(self) -> JsonDict: c = super().default_config() # Enable federation sending on the main process. c["federation_sender_instances"] = None return c - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: test_room_id = "!room:host1" # stub out `get_rooms_for_user` and `get_current_hosts_in_room` so that the # server thinks the user shares a room with `@user2:host2` - def get_rooms_for_user(user_id): - return defer.succeed({test_room_id}) + def get_rooms_for_user(user_id: str) -> "defer.Deferred[FrozenSet[str]]": + return defer.succeed(frozenset({test_room_id})) - hs.get_datastores().main.get_rooms_for_user = get_rooms_for_user + hs.get_datastores().main.get_rooms_for_user = get_rooms_for_user # type: ignore[assignment] - async def get_current_hosts_in_room(room_id): + async def get_current_hosts_in_room(room_id: str) -> Set[str]: if room_id == test_room_id: - return ["host2"] - - # TODO: We should fail the test when we encounter an unxpected room ID. - # We can't just use `self.fail(...)` here because the app code is greedy - # with `Exception` and will catch it before the test can see it. + return {"host2"} + else: + # TODO: We should fail the test when we encounter an unxpected room ID. + # We can't just use `self.fail(...)` here because the app code is greedy + # with `Exception` and will catch it before the test can see it. + return set() - hs.get_datastores().main.get_current_hosts_in_room = get_current_hosts_in_room + hs.get_datastores().main.get_current_hosts_in_room = get_current_hosts_in_room # type: ignore[assignment] # whenever send_transaction is called, record the edu data - self.edus = [] + self.edus: List[JsonDict] = [] self.hs.get_federation_transport_client().send_transaction.side_effect = ( self.record_transaction ) - def record_transaction(self, txn, json_cb): + def record_transaction( + self, txn: Transaction, json_cb: Optional[Callable[[], JsonDict]] = None + ) -> "defer.Deferred[JsonDict]": + assert json_cb is not None data = json_cb() self.edus.extend(data["edus"]) return defer.succeed({}) - def test_send_device_updates(self): + def test_send_device_updates(self) -> None: """Basic case: each device update should result in an EDU""" # create a device u1 = self.register_user("user", "pass") @@ -340,7 +348,7 @@ def test_send_device_updates(self): self.assertEqual(len(self.edus), 1) self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id) - def test_dont_send_device_updates_for_remote_users(self): + def test_dont_send_device_updates_for_remote_users(self) -> None: """Check that we don't send device updates for remote users""" # Send the server a device list EDU for the other user, this will cause @@ -379,7 +387,7 @@ def test_dont_send_device_updates_for_remote_users(self): ) self.assertIn("D1", devices) - def test_upload_signatures(self): + def test_upload_signatures(self) -> None: """Uploading signatures on some devices should produce updates for that user""" e2e_handler = self.hs.get_e2e_keys_handler() @@ -391,7 +399,7 @@ def test_upload_signatures(self): # expect two edus self.assertEqual(len(self.edus), 2) - stream_id = None + stream_id: Optional[int] = None stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D1", stream_id) stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D2", stream_id) @@ -473,13 +481,13 @@ def test_upload_signatures(self): self.assertEqual(edu["edu_type"], EduTypes.DEVICE_LIST_UPDATE) c = edu["content"] if stream_id is not None: - self.assertEqual(c["prev_id"], [stream_id]) + self.assertEqual(c["prev_id"], [stream_id]) # type: ignore[unreachable] self.assertGreaterEqual(c["stream_id"], stream_id) stream_id = c["stream_id"] devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2"}, devices) - def test_delete_devices(self): + def test_delete_devices(self) -> None: """If devices are deleted, that should result in EDUs too""" # create devices @@ -521,7 +529,7 @@ def test_delete_devices(self): devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2", "D3"}, devices) - def test_unreachable_server(self): + def test_unreachable_server(self) -> None: """If the destination server is unreachable, all the updates should get sent on recovery """ @@ -555,7 +563,7 @@ def test_unreachable_server(self): # for each device, there should be a single update self.assertEqual(len(self.edus), 3) - stream_id = None + stream_id: Optional[int] = None for edu in self.edus: self.assertEqual(edu["edu_type"], EduTypes.DEVICE_LIST_UPDATE) c = edu["content"] @@ -566,7 +574,7 @@ def test_unreachable_server(self): devices = {edu["content"]["device_id"] for edu in self.edus} self.assertEqual({"D1", "D2", "D3"}, devices) - def test_prune_outbound_device_pokes1(self): + def test_prune_outbound_device_pokes1(self) -> None: """If a destination is unreachable, and the updates are pruned, we should get a single update. @@ -615,7 +623,7 @@ def test_prune_outbound_device_pokes1(self): # synapse uses an empty prev_id list to indicate "needs a full resync". self.assertEqual(c["prev_id"], []) - def test_prune_outbound_device_pokes2(self): + def test_prune_outbound_device_pokes2(self) -> None: """If a destination is unreachable, and the updates are pruned, we should get a single update. @@ -741,7 +749,7 @@ def encode_pubkey(sk: SigningKey) -> str: return key.encode_verify_key_base64(key.get_verify_key(sk)) -def build_device_dict(user_id: str, device_id: str, sk: SigningKey): +def build_device_dict(user_id: str, device_id: str, sk: SigningKey) -> JsonDict: """Build a dict representing the given device""" return { "user_id": user_id, diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index be719e49c0ca..bba6469b559b 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -21,7 +21,7 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.config.server import DEFAULT_ROOM_VERSION -from synapse.events import make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.federation.federation_server import server_matches_acl_event from synapse.rest import admin from synapse.rest.client import login, room @@ -42,7 +42,7 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase): ] @parameterized.expand([(b"",), (b"foo",), (b'{"limit": Infinity}',)]) - def test_bad_request(self, query_content): + def test_bad_request(self, query_content: bytes) -> None: """ Querying with bad data returns a reasonable error code. """ @@ -64,7 +64,7 @@ def test_bad_request(self, query_content): class ServerACLsTestCase(unittest.TestCase): - def test_blacklisted_server(self): + def test_blacklisted_server(self) -> None: e = _create_acl_event({"allow": ["*"], "deny": ["evil.com"]}) logging.info("ACL event: %s", e.content) @@ -74,7 +74,7 @@ def test_blacklisted_server(self): self.assertTrue(server_matches_acl_event("evil.com.au", e)) self.assertTrue(server_matches_acl_event("honestly.not.evil.com", e)) - def test_block_ip_literals(self): + def test_block_ip_literals(self) -> None: e = _create_acl_event({"allow_ip_literals": False, "allow": ["*"]}) logging.info("ACL event: %s", e.content) @@ -83,7 +83,7 @@ def test_block_ip_literals(self): self.assertFalse(server_matches_acl_event("[1:2::]", e)) self.assertTrue(server_matches_acl_event("1:2:3:4", e)) - def test_wildcard_matching(self): + def test_wildcard_matching(self) -> None: e = _create_acl_event({"allow": ["good*.com"]}) self.assertTrue( server_matches_acl_event("good.com", e), @@ -110,7 +110,7 @@ class StateQueryTests(unittest.FederatingHomeserverTestCase): login.register_servlets, ] - def test_needs_to_be_in_room(self): + def test_needs_to_be_in_room(self) -> None: """/v1/state/ requires the server to be in the room""" u1 = self.register_user("u1", "pass") u1_token = self.login("u1", "pass") @@ -131,7 +131,7 @@ class SendJoinFederationTests(unittest.FederatingHomeserverTestCase): login.register_servlets, ] - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: super().prepare(reactor, clock, hs) self._storage_controllers = hs.get_storage_controllers() @@ -157,7 +157,7 @@ def _make_join(self, user_id: str) -> JsonDict: self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) return channel.json_body - def test_send_join(self): + def test_send_join(self) -> None: """happy-path test of send_join""" joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME join_result = self._make_join(joining_user) @@ -324,7 +324,7 @@ def test_send_join_contributes_to_room_join_rate_limit_and_is_limited(self) -> N # is probably sufficient to reassure that the bucket is updated. -def _create_acl_event(content): +def _create_acl_event(content: JsonDict) -> EventBase: return make_event_from_dict( { "room_id": "!a:b", diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py index e88e5d8bb355..55655de8628e 100644 --- a/tests/federation/transport/server/test__base.py +++ b/tests/federation/transport/server/test__base.py @@ -15,6 +15,8 @@ from http import HTTPStatus from typing import Dict, List, Tuple +from twisted.web.resource import Resource + from synapse.api.errors import Codes from synapse.federation.transport.server import BaseFederationServlet from synapse.federation.transport.server._base import Authenticator, _parse_auth_header @@ -62,7 +64,7 @@ class BaseFederationServletCancellationTests(unittest.FederatingHomeserverTestCa path = f"{CancellableFederationServlet.PREFIX}{CancellableFederationServlet.PATH}" - def create_test_resource(self): + def create_test_resource(self) -> Resource: """Overrides `HomeserverTestCase.create_test_resource`.""" resource = JsonResource(self.hs) diff --git a/tests/federation/transport/test_knocking.py b/tests/federation/transport/test_knocking.py index ff589c0b6ca0..70209ab09011 100644 --- a/tests/federation/transport/test_knocking.py +++ b/tests/federation/transport/test_knocking.py @@ -12,15 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict -from typing import Dict, List +from typing import Any, Dict, List, Optional + +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, JoinRules, Membership -from synapse.api.room_versions import RoomVersions -from synapse.events import builder +from synapse.api.room_versions import RoomVersion, RoomVersions +from synapse.events import EventBase, builder +from synapse.events.snapshot import EventContext from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.types import RoomAlias +from synapse.util import Clock from tests.test_utils import event_injection from tests.unittest import FederatingHomeserverTestCase, HomeserverTestCase @@ -197,7 +201,9 @@ class FederationKnockingTestCase( login.register_servlets, ] - def prepare(self, reactor, clock, homeserver): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: self.store = homeserver.get_datastores().main # We're not going to be properly signing events as our remote homeserver is fake, @@ -205,23 +211,29 @@ def prepare(self, reactor, clock, homeserver): # Note that these checks are not relevant to this test case. # Have this homeserver auto-approve all event signature checking. - async def approve_all_signature_checking(_, pdu): + async def approve_all_signature_checking( + room_version: RoomVersion, + pdu: EventBase, + record_failure_callback: Any = None, + ) -> EventBase: return pdu - homeserver.get_federation_server()._check_sigs_and_hash = ( + homeserver.get_federation_server()._check_sigs_and_hash = ( # type: ignore[assignment] approve_all_signature_checking ) # Have this homeserver skip event auth checks. This is necessary due to # event auth checks ensuring that events were signed by the sender's homeserver. - async def _check_event_auth(origin, event, context, *args, **kwargs): - return context + async def _check_event_auth( + origin: Optional[str], event: EventBase, context: EventContext + ) -> None: + pass - homeserver.get_federation_event_handler()._check_event_auth = _check_event_auth + homeserver.get_federation_event_handler()._check_event_auth = _check_event_auth # type: ignore[assignment] return super().prepare(reactor, clock, homeserver) - def test_room_state_returned_when_knocking(self): + def test_room_state_returned_when_knocking(self) -> None: """ Tests that specific, stripped state events from a room are returned after a remote homeserver successfully knocks on a local room. diff --git a/tests/federation/transport/test_server.py b/tests/federation/transport/test_server.py index cfd550a04bcc..c4231f4aa972 100644 --- a/tests/federation/transport/test_server.py +++ b/tests/federation/transport/test_server.py @@ -20,7 +20,7 @@ class RoomDirectoryFederationTests(unittest.FederatingHomeserverTestCase): @override_config({"allow_public_rooms_over_federation": False}) - def test_blocked_public_room_list_over_federation(self): + def test_blocked_public_room_list_over_federation(self) -> None: """Test that unauthenticated requests to the public rooms directory 403 when allow_public_rooms_over_federation is False. """ @@ -31,7 +31,7 @@ def test_blocked_public_room_list_over_federation(self): self.assertEqual(403, channel.code) @override_config({"allow_public_rooms_over_federation": True}) - def test_open_public_room_list_over_federation(self): + def test_open_public_room_list_over_federation(self) -> None: """Test that unauthenticated requests to the public rooms directory 200 when allow_public_rooms_over_federation is True. """ @@ -42,7 +42,7 @@ def test_open_public_room_list_over_federation(self): self.assertEqual(200, channel.code) @DEBUG - def test_edu_debugging_doesnt_explode(self): + def test_edu_debugging_doesnt_explode(self) -> None: """Sanity check incoming federation succeeds with `synapse.debug_8631` enabled. Remove this when we strip out issue_8631_logger. From d0fa217cd9c4243a234f60767523ea20a1ede4e0 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 6 Feb 2023 11:11:09 -0500 Subject: [PATCH 131/344] Add missing types to test_state. (#14985) --- changelog.d/14985.misc | 1 + mypy.ini | 4 +- tests/test_state.py | 198 ++++++++++++++++++++++++++++------------- 3 files changed, 141 insertions(+), 62 deletions(-) create mode 100644 changelog.d/14985.misc diff --git a/changelog.d/14985.misc b/changelog.d/14985.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/14985.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 2f8806e1f45c..93de1c97ea00 100644 --- a/mypy.ini +++ b/mypy.ini @@ -38,7 +38,6 @@ exclude = (?x) |tests/module_api/test_api.py |tests/rest/media/v1/test_media_storage.py |tests/server.py - |tests/test_state.py |tests/test_terms_auth.py )$ @@ -117,6 +116,9 @@ disallow_untyped_defs = True [mypy-tests.test_server] disallow_untyped_defs = True +[mypy-tests.test_state] +disallow_untyped_defs = True + [mypy-tests.types.*] disallow_untyped_defs = True diff --git a/tests/test_state.py b/tests/test_state.py index 504530b49a8e..b20a26e1ffe4 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -11,7 +11,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Collection, Dict, List, Optional, cast +from typing import ( + Any, + Collection, + Dict, + Generator, + Iterable, + Iterator, + List, + Optional, + Set, + Tuple, + cast, +) from unittest.mock import Mock from twisted.internet import defer @@ -19,9 +31,11 @@ from synapse.api.auth import Auth from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions -from synapse.events import make_event_from_dict +from synapse.events import EventBase, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.state import StateHandler, StateResolutionHandler, _make_state_cache_entry +from synapse.types import MutableStateMap, StateMap +from synapse.types.state import StateFilter from synapse.util import Clock from synapse.util.macaroons import MacaroonGenerator @@ -33,14 +47,14 @@ def create_event( - name=None, - type=None, - state_key=None, - depth=2, - event_id=None, - prev_events: Optional[List[str]] = None, - **kwargs, -): + name: Optional[str] = None, + type: Optional[str] = None, + state_key: Optional[str] = None, + depth: int = 2, + event_id: Optional[str] = None, + prev_events: Optional[List[Tuple[str, dict]]] = None, + **kwargs: Any, +) -> EventBase: global _next_event_id if not event_id: @@ -67,21 +81,21 @@ def create_event( d.update(kwargs) - event = make_event_from_dict(d) - - return event + return make_event_from_dict(d) class _DummyStore: - def __init__(self): - self._event_to_state_group = {} - self._group_to_state = {} + def __init__(self) -> None: + self._event_to_state_group: Dict[str, int] = {} + self._group_to_state: Dict[int, MutableStateMap[str]] = {} - self._event_id_to_event = {} + self._event_id_to_event: Dict[str, EventBase] = {} self._next_group = 1 - async def get_state_groups_ids(self, room_id, event_ids): + async def get_state_groups_ids( + self, room_id: str, event_ids: Collection[str] + ) -> Dict[int, MutableStateMap[str]]: groups = {} for event_id in event_ids: group = self._event_to_state_group.get(event_id) @@ -90,16 +104,25 @@ async def get_state_groups_ids(self, room_id, event_ids): return groups - async def get_state_ids_for_group(self, state_group, state_filter=None): + async def get_state_ids_for_group( + self, state_group: int, state_filter: Optional[StateFilter] = None + ) -> MutableStateMap[str]: return self._group_to_state[state_group] async def store_state_group( - self, event_id, room_id, prev_group, delta_ids, current_state_ids - ): + self, + event_id: str, + room_id: str, + prev_group: Optional[int], + delta_ids: Optional[StateMap[str]], + current_state_ids: Optional[StateMap[str]], + ) -> int: state_group = self._next_group self._next_group += 1 if current_state_ids is None: + assert prev_group is not None + assert delta_ids is not None current_state_ids = dict(self._group_to_state[prev_group]) current_state_ids.update(delta_ids) @@ -107,7 +130,9 @@ async def store_state_group( return state_group - async def get_events(self, event_ids, **kwargs): + async def get_events( + self, event_ids: Collection[str], **kwargs: Any + ) -> Dict[str, EventBase]: return { e_id: self._event_id_to_event[e_id] for e_id in event_ids @@ -119,31 +144,36 @@ async def get_partial_state_events( ) -> Dict[str, bool]: return {e: False for e in event_ids} - async def get_state_group_delta(self, name): + async def get_state_group_delta( + self, name: str + ) -> Tuple[Optional[int], Optional[StateMap[str]]]: return None, None - def register_events(self, events): + def register_events(self, events: Iterable[EventBase]) -> None: for e in events: self._event_id_to_event[e.event_id] = e - def register_event_context(self, event, context): + def register_event_context(self, event: EventBase, context: EventContext) -> None: + assert context.state_group is not None self._event_to_state_group[event.event_id] = context.state_group - def register_event_id_state_group(self, event_id, state_group): + def register_event_id_state_group(self, event_id: str, state_group: int) -> None: self._event_to_state_group[event_id] = state_group - async def get_room_version_id(self, room_id): + async def get_room_version_id(self, room_id: str) -> str: return RoomVersions.V1.identifier async def get_state_group_for_events( - self, event_ids, await_full_state: bool = True - ): + self, event_ids: Collection[str], await_full_state: bool = True + ) -> Dict[str, int]: res = {} for event in event_ids: res[event] = self._event_to_state_group[event] return res - async def get_state_for_groups(self, groups): + async def get_state_for_groups( + self, groups: Collection[int] + ) -> Dict[int, MutableStateMap[str]]: res = {} for group in groups: state = self._group_to_state[group] @@ -152,21 +182,21 @@ async def get_state_for_groups(self, groups): class DictObj(dict): - def __init__(self, **kwargs): + def __init__(self, **kwargs: Any) -> None: super().__init__(kwargs) self.__dict__ = self class Graph: - def __init__(self, nodes, edges): - events = {} - clobbered = set(events.keys()) + def __init__(self, nodes: Dict[str, DictObj], edges: Dict[str, List[str]]): + events: Dict[str, EventBase] = {} + clobbered: Set[str] = set() for event_id, fields in nodes.items(): refs = edges.get(event_id) if refs: clobbered.difference_update(refs) - prev_events = [(r, {}) for r in refs] + prev_events: List[Tuple[str, dict]] = [(r, {}) for r in refs] else: prev_events = [] @@ -177,15 +207,12 @@ def __init__(self, nodes, edges): self._leaves = clobbered self._events = sorted(events.values(), key=lambda e: e.depth) - def walk(self): + def walk(self) -> Iterator[EventBase]: return iter(self._events) - def get_leaves(self): - return (self._events[i] for i in self._leaves) - class StateTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.dummy_store = _DummyStore() storage_controllers = Mock(main=self.dummy_store, state=self.dummy_store) hs = Mock( @@ -220,7 +247,7 @@ def setUp(self): self.event_id = 0 @defer.inlineCallbacks - def test_branch_no_conflict(self): + def test_branch_no_conflict(self) -> Generator[defer.Deferred, Any, None]: graph = Graph( nodes={ "START": DictObj( @@ -248,6 +275,7 @@ def test_branch_no_conflict(self): ctx_c = context_store["C"] ctx_d = context_store["D"] + prev_state_ids: StateMap[str] prev_state_ids = yield defer.ensureDeferred(ctx_d.get_prev_state_ids()) self.assertEqual(2, len(prev_state_ids)) @@ -255,7 +283,9 @@ def test_branch_no_conflict(self): self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group) @defer.inlineCallbacks - def test_branch_basic_conflict(self): + def test_branch_basic_conflict( + self, + ) -> Generator["defer.Deferred[object]", Any, None]: graph = Graph( nodes={ "START": DictObj( @@ -280,7 +310,7 @@ def test_branch_basic_conflict(self): self.dummy_store.register_events(graph.walk()) - context_store = {} + context_store: Dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( @@ -294,6 +324,7 @@ def test_branch_basic_conflict(self): ctx_c = context_store["C"] ctx_d = context_store["D"] + prev_state_ids: StateMap[str] prev_state_ids = yield defer.ensureDeferred(ctx_d.get_prev_state_ids()) self.assertSetEqual({"START", "A", "C"}, set(prev_state_ids.values())) @@ -301,7 +332,9 @@ def test_branch_basic_conflict(self): self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group) @defer.inlineCallbacks - def test_branch_have_banned_conflict(self): + def test_branch_have_banned_conflict( + self, + ) -> Generator["defer.Deferred[object]", Any, None]: graph = Graph( nodes={ "START": DictObj( @@ -338,7 +371,7 @@ def test_branch_have_banned_conflict(self): self.dummy_store.register_events(graph.walk()) - context_store = {} + context_store: Dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( @@ -353,13 +386,16 @@ def test_branch_have_banned_conflict(self): ctx_c = context_store["C"] ctx_e = context_store["E"] + prev_state_ids: StateMap[str] prev_state_ids = yield defer.ensureDeferred(ctx_e.get_prev_state_ids()) self.assertSetEqual({"START", "A", "B", "C"}, set(prev_state_ids.values())) self.assertEqual(ctx_c.state_group, ctx_e.state_group_before_event) self.assertEqual(ctx_e.state_group_before_event, ctx_e.state_group) @defer.inlineCallbacks - def test_branch_have_perms_conflict(self): + def test_branch_have_perms_conflict( + self, + ) -> Generator["defer.Deferred[object]", Any, None]: userid1 = "@user_id:example.com" userid2 = "@user_id2:example.com" @@ -413,7 +449,7 @@ def test_branch_have_perms_conflict(self): self.dummy_store.register_events(graph.walk()) - context_store = {} + context_store: Dict[str, EventContext] = {} for event in graph.walk(): context = yield defer.ensureDeferred( @@ -428,14 +464,17 @@ def test_branch_have_perms_conflict(self): ctx_b = context_store["B"] ctx_d = context_store["D"] + prev_state_ids: StateMap[str] prev_state_ids = yield defer.ensureDeferred(ctx_d.get_prev_state_ids()) self.assertSetEqual({"A1", "A2", "A3", "A5", "B"}, set(prev_state_ids.values())) self.assertEqual(ctx_b.state_group, ctx_d.state_group_before_event) self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group) - def _add_depths(self, nodes, edges): - def _get_depth(ev): + def _add_depths( + self, nodes: Dict[str, DictObj], edges: Dict[str, List[str]] + ) -> None: + def _get_depth(ev: str) -> int: node = nodes[ev] if "depth" not in node: prevs = edges[ev] @@ -447,7 +486,9 @@ def _get_depth(ev): _get_depth(n) @defer.inlineCallbacks - def test_annotate_with_old_message(self): + def test_annotate_with_old_message( + self, + ) -> Generator["defer.Deferred[object]", Any, None]: event = create_event(type="test_message", name="event") old_state = [ @@ -456,6 +497,7 @@ def test_annotate_with_old_message(self): create_event(type="test2", state_key=""), ] + context: EventContext context = yield defer.ensureDeferred( self.state.compute_event_context( event, @@ -466,9 +508,11 @@ def test_annotate_with_old_message(self): ) ) + prev_state_ids: StateMap[str] prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids()) self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values()) + current_state_ids: StateMap[str] current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids()) self.assertCountEqual( (e.event_id for e in old_state), current_state_ids.values() @@ -478,7 +522,9 @@ def test_annotate_with_old_message(self): self.assertEqual(context.state_group_before_event, context.state_group) @defer.inlineCallbacks - def test_annotate_with_old_state(self): + def test_annotate_with_old_state( + self, + ) -> Generator["defer.Deferred[object]", Any, None]: event = create_event(type="state", state_key="", name="event") old_state = [ @@ -487,6 +533,7 @@ def test_annotate_with_old_state(self): create_event(type="test2", state_key=""), ] + context: EventContext context = yield defer.ensureDeferred( self.state.compute_event_context( event, @@ -497,9 +544,11 @@ def test_annotate_with_old_state(self): ) ) + prev_state_ids: StateMap[str] prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids()) self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values()) + current_state_ids: StateMap[str] current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids()) self.assertCountEqual( (e.event_id for e in old_state + [event]), current_state_ids.values() @@ -511,7 +560,9 @@ def test_annotate_with_old_state(self): self.assertEqual({("state", ""): event.event_id}, context.delta_ids) @defer.inlineCallbacks - def test_trivial_annotate_message(self): + def test_trivial_annotate_message( + self, + ) -> Generator["defer.Deferred[object]", Any, None]: prev_event_id = "prev_event_id" event = create_event( type="test_message", name="event2", prev_events=[(prev_event_id, {})] @@ -534,8 +585,10 @@ def test_trivial_annotate_message(self): ) self.dummy_store.register_event_id_state_group(prev_event_id, group_name) + context: EventContext context = yield defer.ensureDeferred(self.state.compute_event_context(event)) + current_state_ids: StateMap[str] current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids()) self.assertEqual( @@ -545,7 +598,9 @@ def test_trivial_annotate_message(self): self.assertEqual(group_name, context.state_group) @defer.inlineCallbacks - def test_trivial_annotate_state(self): + def test_trivial_annotate_state( + self, + ) -> Generator["defer.Deferred[object]", Any, None]: prev_event_id = "prev_event_id" event = create_event( type="state", state_key="", name="event2", prev_events=[(prev_event_id, {})] @@ -568,8 +623,10 @@ def test_trivial_annotate_state(self): ) self.dummy_store.register_event_id_state_group(prev_event_id, group_name) + context: EventContext context = yield defer.ensureDeferred(self.state.compute_event_context(event)) + prev_state_ids: StateMap[str] prev_state_ids = yield defer.ensureDeferred(context.get_prev_state_ids()) self.assertEqual({e.event_id for e in old_state}, set(prev_state_ids.values())) @@ -577,7 +634,9 @@ def test_trivial_annotate_state(self): self.assertIsNotNone(context.state_group) @defer.inlineCallbacks - def test_resolve_message_conflict(self): + def test_resolve_message_conflict( + self, + ) -> Generator["defer.Deferred[Any]", Any, None]: prev_event_id1 = "event_id1" prev_event_id2 = "event_id2" event = create_event( @@ -605,10 +664,12 @@ def test_resolve_message_conflict(self): self.dummy_store.register_events(old_state_1) self.dummy_store.register_events(old_state_2) + context: EventContext context = yield self._get_context( event, prev_event_id1, old_state_1, prev_event_id2, old_state_2 ) + current_state_ids: StateMap[str] current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids()) self.assertEqual(len(current_state_ids), 6) @@ -616,7 +677,9 @@ def test_resolve_message_conflict(self): self.assertIsNotNone(context.state_group) @defer.inlineCallbacks - def test_resolve_state_conflict(self): + def test_resolve_state_conflict( + self, + ) -> Generator["defer.Deferred[Any]", Any, None]: prev_event_id1 = "event_id1" prev_event_id2 = "event_id2" event = create_event( @@ -645,12 +708,14 @@ def test_resolve_state_conflict(self): store = _DummyStore() store.register_events(old_state_1) store.register_events(old_state_2) - self.dummy_store.get_events = store.get_events + self.dummy_store.get_events = store.get_events # type: ignore[assignment] + context: EventContext context = yield self._get_context( event, prev_event_id1, old_state_1, prev_event_id2, old_state_2 ) + current_state_ids: StateMap[str] current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids()) self.assertEqual(len(current_state_ids), 6) @@ -658,7 +723,9 @@ def test_resolve_state_conflict(self): self.assertIsNotNone(context.state_group) @defer.inlineCallbacks - def test_standard_depth_conflict(self): + def test_standard_depth_conflict( + self, + ) -> Generator["defer.Deferred[Any]", Any, None]: prev_event_id1 = "event_id1" prev_event_id2 = "event_id2" event = create_event( @@ -700,12 +767,14 @@ def test_standard_depth_conflict(self): store = _DummyStore() store.register_events(old_state_1) store.register_events(old_state_2) - self.dummy_store.get_events = store.get_events + self.dummy_store.get_events = store.get_events # type: ignore[assignment] + context: EventContext context = yield self._get_context( event, prev_event_id1, old_state_1, prev_event_id2, old_state_2 ) + current_state_ids: StateMap[str] current_state_ids = yield defer.ensureDeferred(context.get_current_state_ids()) self.assertEqual(old_state_2[3].event_id, current_state_ids[("test1", "1")]) @@ -740,8 +809,14 @@ def test_standard_depth_conflict(self): @defer.inlineCallbacks def _get_context( - self, event, prev_event_id_1, old_state_1, prev_event_id_2, old_state_2 - ): + self, + event: EventBase, + prev_event_id_1: str, + old_state_1: Collection[EventBase], + prev_event_id_2: str, + old_state_2: Collection[EventBase], + ) -> Generator["defer.Deferred[object]", Any, EventContext]: + sg1: int sg1 = yield defer.ensureDeferred( self.dummy_store.store_state_group( prev_event_id_1, @@ -753,6 +828,7 @@ def _get_context( ) self.dummy_store.register_event_id_state_group(prev_event_id_1, sg1) + sg2: int sg2 = yield defer.ensureDeferred( self.dummy_store.store_state_group( prev_event_id_2, @@ -767,7 +843,7 @@ def _get_context( result = yield defer.ensureDeferred(self.state.compute_event_context(event)) return result - def test_make_state_cache_entry(self): + def test_make_state_cache_entry(self) -> None: "Test that calculating a prev_group and delta is correct" new_state = { From 64a631879c8be583efe47ca48e39f6cdf6115645 Mon Sep 17 00:00:00 2001 From: icp Date: Tue, 7 Feb 2023 01:04:14 +0530 Subject: [PATCH 132/344] Allow poetry-core 1.5.0 (#14949) --- changelog.d/14949.misc | 1 + pyproject.toml | 6 ++++-- 2 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14949.misc diff --git a/changelog.d/14949.misc b/changelog.d/14949.misc new file mode 100644 index 000000000000..793a78a5dd5c --- /dev/null +++ b/changelog.d/14949.misc @@ -0,0 +1 @@ +Update build system requirements to allow building with poetry-core==1.5.0. diff --git a/pyproject.toml b/pyproject.toml index 8f7ced99a2ca..12734dec710e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -127,7 +127,9 @@ exclude = [ { path = "synapse/*.so", format = "sdist"} ] -build = "build_rust.py" +[tool.poetry.build] +script = "build_rust.py" +generate-setup-file = true [tool.poetry.scripts] synapse_homeserver = "synapse.app.homeserver:main" @@ -350,7 +352,7 @@ towncrier = ">=18.6.0rc1" # system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.0.0,<=1.3.2", "setuptools_rust>=1.3,<=1.5.2"] +requires = ["poetry-core>=1.0.0,<=1.5.0", "setuptools_rust>=1.3,<=1.5.2"] build-backend = "poetry.core.masonry.api" From 5fdc12f482c68e2cdbb78d7db5de2cfe621720d4 Mon Sep 17 00:00:00 2001 From: Nick Mills-Barrett Date: Tue, 7 Feb 2023 01:10:54 +0100 Subject: [PATCH 133/344] Add `event_stream_ordering` column to membership state tables (#14979) This adds an `event_stream_ordering` column to `current_state_events`, `local_current_membership` and `room_memberships`. Each of these tables is regularly joined with the `events` table to get the stream ordering and denormalising this into each table will yield significant query performance improvements once used. Includes a background job to populate these values from the `events` table. Same idea as https://github.com/matrix-org/synapse/pull/13703. Signed off by Nick @ Beeper (@fizzadar). --- changelog.d/14979.misc | 1 + synapse/storage/databases/main/events.py | 23 +++- .../databases/main/events_bg_updates.py | 104 +++++++++++++++++- .../storage/databases/main/events_worker.py | 8 +- ...embership_tables_event_stream_ordering.sql | 21 ++++ 5 files changed, 146 insertions(+), 11 deletions(-) create mode 100644 changelog.d/14979.misc create mode 100644 synapse/storage/schema/main/delta/73/26membership_tables_event_stream_ordering.sql diff --git a/changelog.d/14979.misc b/changelog.d/14979.misc new file mode 100644 index 000000000000..c09911e48d2f --- /dev/null +++ b/changelog.d/14979.misc @@ -0,0 +1 @@ +Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 1536937b67e8..b6cce0a7ccf1 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1147,11 +1147,15 @@ def _update_current_state_txn( # been inserted into room_memberships. txn.execute_batch( """INSERT INTO current_state_events - (room_id, type, state_key, event_id, membership) - VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?)) + (room_id, type, state_key, event_id, membership, event_stream_ordering) + VALUES ( + ?, ?, ?, ?, + (SELECT membership FROM room_memberships WHERE event_id = ?), + (SELECT stream_ordering FROM events WHERE event_id = ?) + ) """, [ - (room_id, key[0], key[1], ev_id, ev_id) + (room_id, key[0], key[1], ev_id, ev_id, ev_id) for key, ev_id in to_insert.items() ], ) @@ -1178,11 +1182,15 @@ def _update_current_state_txn( if to_insert: txn.execute_batch( """INSERT INTO local_current_membership - (room_id, user_id, event_id, membership) - VALUES (?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?)) + (room_id, user_id, event_id, membership, event_stream_ordering) + VALUES ( + ?, ?, ?, + (SELECT membership FROM room_memberships WHERE event_id = ?), + (SELECT stream_ordering FROM events WHERE event_id = ?) + ) """, [ - (room_id, key[1], ev_id, ev_id) + (room_id, key[1], ev_id, ev_id, ev_id) for key, ev_id in to_insert.items() if key[0] == EventTypes.Member and self.is_mine_id(key[1]) ], @@ -1790,6 +1798,7 @@ def _store_room_members_txn( table="room_memberships", keys=( "event_id", + "event_stream_ordering", "user_id", "sender", "room_id", @@ -1800,6 +1809,7 @@ def _store_room_members_txn( values=[ ( event.event_id, + event.internal_metadata.stream_ordering, event.state_key, event.user_id, event.room_id, @@ -1832,6 +1842,7 @@ def _store_room_members_txn( keyvalues={"room_id": event.room_id, "user_id": event.state_key}, values={ "event_id": event.event_id, + "event_stream_ordering": event.internal_metadata.stream_ordering, "membership": event.membership, }, ) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index b9d3c36d602c..0e81d38ccaee 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -17,7 +17,7 @@ import attr -from synapse.api.constants import EventContentFields, RelationTypes +from synapse.api.constants import EventContentFields, EventTypes, RelationTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import make_event_from_dict from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -71,6 +71,10 @@ class _BackgroundUpdates: EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index" + POPULATE_MEMBERSHIP_EVENT_STREAM_ORDERING = ( + "populate_membership_event_stream_ordering" + ) + @attr.s(slots=True, frozen=True, auto_attribs=True) class _CalculateChainCover: @@ -99,6 +103,10 @@ def __init__( ): super().__init__(database, db_conn, hs) + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.POPULATE_MEMBERSHIP_EVENT_STREAM_ORDERING, + self._populate_membership_event_stream_ordering, + ) self.db_pool.updates.register_background_update_handler( _BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts, @@ -1498,3 +1506,97 @@ def _populate_txn(txn: LoggingTransaction) -> bool: ) return batch_size + + async def _populate_membership_event_stream_ordering( + self, progress: JsonDict, batch_size: int + ) -> int: + def _populate_membership_event_stream_ordering( + txn: LoggingTransaction, + ) -> bool: + + if "max_stream_ordering" in progress: + max_stream_ordering = progress["max_stream_ordering"] + else: + txn.execute("SELECT max(stream_ordering) FROM events") + res = txn.fetchone() + if res is None or res[0] is None: + return True + else: + max_stream_ordering = res[0] + + start = progress.get("stream_ordering", 0) + stop = start + batch_size + + sql = f""" + SELECT room_id, event_id, stream_ordering + FROM events + WHERE + type = '{EventTypes.Member}' + AND stream_ordering >= ? + AND stream_ordering < ? + """ + txn.execute(sql, (start, stop)) + + rows: List[Tuple[str, str, int]] = cast( + List[Tuple[str, str, int]], txn.fetchall() + ) + + event_ids: List[Tuple[str]] = [] + event_stream_orderings: List[Tuple[int]] = [] + + for _, event_id, event_stream_ordering in rows: + event_ids.append((event_id,)) + event_stream_orderings.append((event_stream_ordering,)) + + self.db_pool.simple_update_many_txn( + txn, + table="current_state_events", + key_names=("event_id",), + key_values=event_ids, + value_names=("event_stream_ordering",), + value_values=event_stream_orderings, + ) + + self.db_pool.simple_update_many_txn( + txn, + table="room_memberships", + key_names=("event_id",), + key_values=event_ids, + value_names=("event_stream_ordering",), + value_values=event_stream_orderings, + ) + + # NOTE: local_current_membership has no index on event_id, so only + # the room ID here will reduce the query rows read. + for room_id, event_id, event_stream_ordering in rows: + txn.execute( + """ + UPDATE local_current_membership + SET event_stream_ordering = ? + WHERE room_id = ? AND event_id = ? + """, + (event_stream_ordering, room_id, event_id), + ) + + self.db_pool.updates._background_update_progress_txn( + txn, + _BackgroundUpdates.POPULATE_MEMBERSHIP_EVENT_STREAM_ORDERING, + { + "stream_ordering": stop, + "max_stream_ordering": max_stream_ordering, + }, + ) + + return stop > max_stream_ordering + + finished = await self.db_pool.runInteraction( + "_populate_membership_event_stream_ordering", + _populate_membership_event_stream_ordering, + ) + + if finished: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.POPULATE_MEMBERSHIP_EVENT_STREAM_ORDERING + ) + + return batch_size diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index d7d08369cacd..6d0ef10258de 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1779,7 +1779,7 @@ def get_ex_outlier_stream_rows_txn( txn: LoggingTransaction, ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: sql = ( - "SELECT event_stream_ordering, e.event_id, e.room_id, e.type," + "SELECT out.event_stream_ordering, e.event_id, e.room_id, e.type," " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL," " e.outlier" " FROM events AS e" @@ -1791,10 +1791,10 @@ def get_ex_outlier_stream_rows_txn( " LEFT JOIN event_relations USING (event_id)" " LEFT JOIN room_memberships USING (event_id)" " LEFT JOIN rejections USING (event_id)" - " WHERE ? < event_stream_ordering" - " AND event_stream_ordering <= ?" + " WHERE ? < out.event_stream_ordering" + " AND out.event_stream_ordering <= ?" " AND out.instance_name = ?" - " ORDER BY event_stream_ordering ASC" + " ORDER BY out.event_stream_ordering ASC" ) txn.execute(sql, (last_id, current_id, instance_name)) diff --git a/synapse/storage/schema/main/delta/73/26membership_tables_event_stream_ordering.sql b/synapse/storage/schema/main/delta/73/26membership_tables_event_stream_ordering.sql new file mode 100644 index 000000000000..7c30a67fc4d1 --- /dev/null +++ b/synapse/storage/schema/main/delta/73/26membership_tables_event_stream_ordering.sql @@ -0,0 +1,21 @@ +/* Copyright 2022 Beeper + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE current_state_events ADD COLUMN event_stream_ordering BIGINT; +ALTER TABLE local_current_membership ADD COLUMN event_stream_ordering BIGINT; +ALTER TABLE room_memberships ADD COLUMN event_stream_ordering BIGINT; + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('populate_membership_event_stream_ordering', '{}'); From d0fed7a37b8b6ce166cae856fe243757aa7c7294 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Feb 2023 00:20:04 +0000 Subject: [PATCH 134/344] Properly typecheck types.http (#14988) * Tweak http types in Synapse AFACIS these are correct, and they make mypy happier on tests.http. * Type hints for test_proxyagent * type hints for test_srv_resolver * test_matrix_federation_agent * tests.http.server._base * tests.http.__init__ * tests.http.test_additional_resource * tests.http.test_client * tests.http.test_endpoint * tests.http.test_matrixfederationclient * tests.http.test_servlet * tests.http.test_simple_client * tests.http.test_site * One fixup in tests.server * Untyped defs * Changelog * Fixup syntax for Python 3.7 * Fix olddeps syntax * Use a twisted IPv4 addr for dummy_address * Fix typo, thanks Sean Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> * Remove redundant `Optional` --------- Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/14988.misc | 1 + mypy.ini | 6 +- synapse/http/client.py | 5 +- synapse/http/proxyagent.py | 3 +- tests/http/__init__.py | 19 ++- .../test_matrix_federation_agent.py | 142 +++++++++++------- tests/http/federation/test_srv_resolver.py | 60 +++++--- tests/http/server/_base.py | 2 +- tests/http/test_additional_resource.py | 18 ++- tests/http/test_client.py | 37 +++-- tests/http/test_endpoint.py | 4 +- tests/http/test_matrixfederationclient.py | 53 ++++--- tests/http/test_proxyagent.py | 103 ++++++++----- tests/http/test_servlet.py | 8 +- tests/http/test_simple_client.py | 14 +- tests/http/test_site.py | 8 +- tests/server.py | 6 +- 17 files changed, 298 insertions(+), 191 deletions(-) create mode 100644 changelog.d/14988.misc diff --git a/changelog.d/14988.misc b/changelog.d/14988.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/14988.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 93de1c97ea00..11e683b7042c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,9 +32,6 @@ exclude = (?x) |synapse/storage/databases/main/cache.py |synapse/storage/schema/ - |tests/http/federation/test_matrix_federation_agent.py - |tests/http/federation/test_srv_resolver.py - |tests/http/test_proxyagent.py |tests/module_api/test_api.py |tests/rest/media/v1/test_media_storage.py |tests/server.py @@ -92,6 +89,9 @@ disallow_untyped_defs = True [mypy-tests.handlers.*] disallow_untyped_defs = True +[mypy-tests.http.*] +disallow_untyped_defs = True + [mypy-tests.logging.*] disallow_untyped_defs = True diff --git a/synapse/http/client.py b/synapse/http/client.py index 4eb740c04020..a05f297933ca 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -44,6 +44,7 @@ IAddress, IDelayedCall, IHostResolution, + IReactorCore, IReactorPluggableNameResolver, IReactorTime, IResolutionReceiver, @@ -226,7 +227,9 @@ def resolutionComplete() -> None: return recv -@implementer(ISynapseReactor) +# ISynapseReactor implies IReactorCore, but explicitly marking it this as an implementer +# of IReactorCore seems to keep mypy-zope happier. +@implementer(IReactorCore, ISynapseReactor) class BlacklistingReactorWrapper: """ A Reactor wrapper which will prevent DNS resolution to blacklisted IP diff --git a/synapse/http/proxyagent.py b/synapse/http/proxyagent.py index 18899bc6d18d..94ef737b9ee0 100644 --- a/synapse/http/proxyagent.py +++ b/synapse/http/proxyagent.py @@ -38,7 +38,6 @@ from synapse.http import redact_uri from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials -from synapse.types import ISynapseReactor logger = logging.getLogger(__name__) @@ -84,7 +83,7 @@ class ProxyAgent(_AgentBase): def __init__( self, reactor: IReactorCore, - proxy_reactor: Optional[ISynapseReactor] = None, + proxy_reactor: Optional[IReactorCore] = None, contextFactory: Optional[IPolicyForHTTPS] = None, connectTimeout: Optional[float] = None, bindAddress: Optional[bytes] = None, diff --git a/tests/http/__init__.py b/tests/http/__init__.py index 093537adef52..528cdee34b8b 100644 --- a/tests/http/__init__.py +++ b/tests/http/__init__.py @@ -19,13 +19,15 @@ from OpenSSL import SSL from OpenSSL.SSL import Connection +from twisted.internet.address import IPv4Address from twisted.internet.interfaces import IOpenSSLServerConnectionCreator from twisted.internet.ssl import Certificate, trustRootFromCertificates +from twisted.protocols.tls import TLSMemoryBIOProtocol from twisted.web.client import BrowserLikePolicyForHTTPS # noqa: F401 from twisted.web.iweb import IPolicyForHTTPS # noqa: F401 -def get_test_https_policy(): +def get_test_https_policy() -> BrowserLikePolicyForHTTPS: """Get a test IPolicyForHTTPS which trusts the test CA cert Returns: @@ -39,7 +41,7 @@ def get_test_https_policy(): return BrowserLikePolicyForHTTPS(trustRoot=trust_root) -def get_test_ca_cert_file(): +def get_test_ca_cert_file() -> str: """Get the path to the test CA cert The keypair is generated with: @@ -51,7 +53,7 @@ def get_test_ca_cert_file(): return os.path.join(os.path.dirname(__file__), "ca.crt") -def get_test_key_file(): +def get_test_key_file() -> str: """get the path to the test key The key file is made with: @@ -137,15 +139,20 @@ class TestServerTLSConnectionFactory: """An SSL connection creator which returns connections which present a certificate signed by our test CA.""" - def __init__(self, sanlist): + def __init__(self, sanlist: List[bytes]): """ Args: - sanlist: list[bytes]: a list of subjectAltName values for the cert + sanlist: a list of subjectAltName values for the cert """ self._cert_file = create_test_cert_file(sanlist) - def serverConnectionForTLS(self, tlsProtocol): + def serverConnectionForTLS(self, tlsProtocol: TLSMemoryBIOProtocol) -> Connection: ctx = SSL.Context(SSL.SSLv23_METHOD) ctx.use_certificate_file(self._cert_file) ctx.use_privatekey_file(get_test_key_file()) return Connection(ctx, None) + + +# A dummy address, useful for tests that use FakeTransport and don't care about where +# packets are going to/coming from. +dummy_address = IPv4Address("TCP", "127.0.0.1", 80) diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index 992d8f94fd70..acfdcd3bca25 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -14,7 +14,7 @@ import base64 import logging import os -from typing import Iterable, Optional +from typing import Any, Awaitable, Callable, Generator, List, Optional, cast from unittest.mock import Mock, patch import treq @@ -24,14 +24,19 @@ from twisted.internet import defer from twisted.internet._sslverify import ClientTLSOptions, OpenSSLCertificateOptions -from twisted.internet.interfaces import IProtocolFactory +from twisted.internet.defer import Deferred +from twisted.internet.endpoints import _WrappingProtocol +from twisted.internet.interfaces import ( + IOpenSSLClientConnectionCreator, + IProtocolFactory, +) from twisted.internet.protocol import Factory from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web._newclient import ResponseNeverReceived from twisted.web.client import Agent from twisted.web.http import HTTPChannel, Request from twisted.web.http_headers import Headers -from twisted.web.iweb import IPolicyForHTTPS +from twisted.web.iweb import IPolicyForHTTPS, IResponse from synapse.config.homeserver import HomeServerConfig from synapse.crypto.context_factory import FederationPolicyForHTTPS @@ -42,11 +47,21 @@ WellKnownResolver, _cache_period_from_headers, ) -from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context +from synapse.logging.context import ( + SENTINEL_CONTEXT, + LoggingContext, + LoggingContextOrSentinel, + current_context, +) +from synapse.types import ISynapseReactor from synapse.util.caches.ttlcache import TTLCache from tests import unittest -from tests.http import TestServerTLSConnectionFactory, get_test_ca_cert_file +from tests.http import ( + TestServerTLSConnectionFactory, + dummy_address, + get_test_ca_cert_file, +) from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.utils import default_config @@ -54,15 +69,17 @@ # Once Async Mocks or lambdas are supported this can go away. -def generate_resolve_service(result): - async def resolve_service(_): +def generate_resolve_service( + result: List[Server], +) -> Callable[[Any], Awaitable[List[Server]]]: + async def resolve_service(_: Any) -> List[Server]: return result return resolve_service class MatrixFederationAgentTests(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() self.mock_resolver = Mock() @@ -75,8 +92,12 @@ def setUp(self): self.tls_factory = FederationPolicyForHTTPS(config) - self.well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) - self.had_well_known_cache = TTLCache("test_cache", timer=self.reactor.seconds) + self.well_known_cache: TTLCache[bytes, Optional[bytes]] = TTLCache( + "test_cache", timer=self.reactor.seconds + ) + self.had_well_known_cache: TTLCache[bytes, bool] = TTLCache( + "test_cache", timer=self.reactor.seconds + ) self.well_known_resolver = WellKnownResolver( self.reactor, Agent(self.reactor, contextFactory=self.tls_factory), @@ -89,8 +110,8 @@ def _make_connection( self, client_factory: IProtocolFactory, ssl: bool = True, - expected_sni: bytes = None, - tls_sanlist: Optional[Iterable[bytes]] = None, + expected_sni: Optional[bytes] = None, + tls_sanlist: Optional[List[bytes]] = None, ) -> HTTPChannel: """Builds a test server, and completes the outgoing client connection Args: @@ -116,8 +137,8 @@ def _make_connection( if ssl: server_factory = _wrap_server_factory_for_tls(server_factory, tls_sanlist) - server_protocol = server_factory.buildProtocol(None) - + server_protocol = server_factory.buildProtocol(dummy_address) + assert server_protocol is not None # now, tell the client protocol factory to build the client protocol (it will be a # _WrappingProtocol, around a TLSMemoryBIOProtocol, around an # HTTP11ClientProtocol) and wire the output of said protocol up to the server via @@ -125,7 +146,8 @@ def _make_connection( # # Normally this would be done by the TCP socket code in Twisted, but we are # stubbing that out here. - client_protocol = client_factory.buildProtocol(None) + client_protocol = client_factory.buildProtocol(dummy_address) + assert isinstance(client_protocol, _WrappingProtocol) client_protocol.makeConnection( FakeTransport(server_protocol, self.reactor, client_protocol) ) @@ -136,6 +158,7 @@ def _make_connection( ) if ssl: + assert isinstance(server_protocol, TLSMemoryBIOProtocol) # fish the test server back out of the server-side TLS protocol. http_protocol = server_protocol.wrappedProtocol # grab a hold of the TLS connection, in case it gets torn down @@ -144,6 +167,7 @@ def _make_connection( http_protocol = server_protocol tls_connection = None + assert isinstance(http_protocol, HTTPChannel) # give the reactor a pump to get the TLS juices flowing (if needed) self.reactor.advance(0) @@ -159,12 +183,14 @@ def _make_connection( return http_protocol @defer.inlineCallbacks - def _make_get_request(self, uri: bytes): + def _make_get_request( + self, uri: bytes + ) -> Generator["Deferred[object]", object, IResponse]: """ Sends a simple GET request via the agent, and checks its logcontext management """ with LoggingContext("one") as context: - fetch_d = self.agent.request(b"GET", uri) + fetch_d: Deferred[IResponse] = self.agent.request(b"GET", uri) # Nothing happened yet self.assertNoResult(fetch_d) @@ -172,8 +198,9 @@ def _make_get_request(self, uri: bytes): # should have reset logcontext to the sentinel _check_logcontext(SENTINEL_CONTEXT) + fetch_res: IResponse try: - fetch_res = yield fetch_d + fetch_res = yield fetch_d # type: ignore[misc, assignment] return fetch_res except Exception as e: logger.info("Fetch of %s failed: %s", uri.decode("ascii"), e) @@ -216,7 +243,7 @@ def _send_well_known_response( request: Request, content: bytes, headers: Optional[dict] = None, - ): + ) -> None: """Check that an incoming request looks like a valid .well-known request, and send back the response. """ @@ -237,16 +264,16 @@ def _make_agent(self) -> MatrixFederationAgent: because it is created too early during setUp """ return MatrixFederationAgent( - reactor=self.reactor, + reactor=cast(ISynapseReactor, self.reactor), tls_client_options_factory=self.tls_factory, - user_agent="test-agent", # Note that this is unused since _well_known_resolver is provided. + user_agent=b"test-agent", # Note that this is unused since _well_known_resolver is provided. ip_whitelist=IPSet(), ip_blacklist=IPSet(), _srv_resolver=self.mock_resolver, _well_known_resolver=self.well_known_resolver, ) - def test_get(self): + def test_get(self) -> None: """happy-path test of a GET request with an explicit port""" self._do_get() @@ -254,11 +281,11 @@ def test_get(self): os.environ, {"https_proxy": "proxy.com", "no_proxy": "testserv"}, ) - def test_get_bypass_proxy(self): + def test_get_bypass_proxy(self) -> None: """test of a GET request with an explicit port and bypass proxy""" self._do_get() - def _do_get(self): + def _do_get(self) -> None: """test of a GET request with an explicit port""" self.agent = self._make_agent() @@ -318,7 +345,7 @@ def _do_get(self): @patch.dict( os.environ, {"https_proxy": "http://proxy.com", "no_proxy": "unused.com"} ) - def test_get_via_http_proxy(self): + def test_get_via_http_proxy(self) -> None: """test for federation request through a http proxy""" self._do_get_via_proxy(expect_proxy_ssl=False, expected_auth_credentials=None) @@ -326,7 +353,7 @@ def test_get_via_http_proxy(self): os.environ, {"https_proxy": "http://user:pass@proxy.com", "no_proxy": "unused.com"}, ) - def test_get_via_http_proxy_with_auth(self): + def test_get_via_http_proxy_with_auth(self) -> None: """test for federation request through a http proxy with authentication""" self._do_get_via_proxy( expect_proxy_ssl=False, expected_auth_credentials=b"user:pass" @@ -335,7 +362,7 @@ def test_get_via_http_proxy_with_auth(self): @patch.dict( os.environ, {"https_proxy": "https://proxy.com", "no_proxy": "unused.com"} ) - def test_get_via_https_proxy(self): + def test_get_via_https_proxy(self) -> None: """test for federation request through a https proxy""" self._do_get_via_proxy(expect_proxy_ssl=True, expected_auth_credentials=None) @@ -343,7 +370,7 @@ def test_get_via_https_proxy(self): os.environ, {"https_proxy": "https://user:pass@proxy.com", "no_proxy": "unused.com"}, ) - def test_get_via_https_proxy_with_auth(self): + def test_get_via_https_proxy_with_auth(self) -> None: """test for federation request through a https proxy with authentication""" self._do_get_via_proxy( expect_proxy_ssl=True, expected_auth_credentials=b"user:pass" @@ -353,7 +380,7 @@ def _do_get_via_proxy( self, expect_proxy_ssl: bool = False, expected_auth_credentials: Optional[bytes] = None, - ): + ) -> None: """Send a https federation request via an agent and check that it is correctly received at the proxy and client. The proxy can use either http or https. Args: @@ -418,10 +445,12 @@ def _do_get_via_proxy( # now we make another test server to act as the upstream HTTP server. server_ssl_protocol = _wrap_server_factory_for_tls( _get_test_protocol_factory() - ).buildProtocol(None) + ).buildProtocol(dummy_address) + assert isinstance(server_ssl_protocol, TLSMemoryBIOProtocol) # Tell the HTTP server to send outgoing traffic back via the proxy's transport. proxy_server_transport = proxy_server.transport + assert proxy_server_transport is not None server_ssl_protocol.makeConnection(proxy_server_transport) # ... and replace the protocol on the proxy's transport with the @@ -451,6 +480,7 @@ def _do_get_via_proxy( # now there should be a pending request http_server = server_ssl_protocol.wrappedProtocol + assert isinstance(http_server, HTTPChannel) self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] @@ -491,7 +521,7 @@ def _do_get_via_proxy( json = self.successResultOf(treq.json_content(response)) self.assertEqual(json, {"a": 1}) - def test_get_ip_address(self): + def test_get_ip_address(self) -> None: """ Test the behaviour when the server name contains an explicit IP (with no port) """ @@ -526,7 +556,7 @@ def test_get_ip_address(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) - def test_get_ipv6_address(self): + def test_get_ipv6_address(self) -> None: """ Test the behaviour when the server name contains an explicit IPv6 address (with no port) @@ -562,7 +592,7 @@ def test_get_ipv6_address(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) - def test_get_ipv6_address_with_port(self): + def test_get_ipv6_address_with_port(self) -> None: """ Test the behaviour when the server name contains an explicit IPv6 address (with explicit port) @@ -598,7 +628,7 @@ def test_get_ipv6_address_with_port(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) - def test_get_hostname_bad_cert(self): + def test_get_hostname_bad_cert(self) -> None: """ Test the behaviour when the certificate on the server doesn't match the hostname """ @@ -651,7 +681,7 @@ def test_get_hostname_bad_cert(self): failure_reason = e.value.reasons[0] self.assertIsInstance(failure_reason.value, VerificationError) - def test_get_ip_address_bad_cert(self): + def test_get_ip_address_bad_cert(self) -> None: """ Test the behaviour when the server name contains an explicit IP, but the server cert doesn't cover it @@ -684,7 +714,7 @@ def test_get_ip_address_bad_cert(self): failure_reason = e.value.reasons[0] self.assertIsInstance(failure_reason.value, VerificationError) - def test_get_no_srv_no_well_known(self): + def test_get_no_srv_no_well_known(self) -> None: """ Test the behaviour when the server name has no port, no SRV, and no well-known """ @@ -740,7 +770,7 @@ def test_get_no_srv_no_well_known(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) - def test_get_well_known(self): + def test_get_well_known(self) -> None: """Test the behaviour when the .well-known delegates elsewhere""" self.agent = self._make_agent() @@ -802,7 +832,7 @@ def test_get_well_known(self): self.well_known_cache.expire() self.assertNotIn(b"testserv", self.well_known_cache) - def test_get_well_known_redirect(self): + def test_get_well_known_redirect(self) -> None: """Test the behaviour when the server name has no port and no SRV record, but the .well-known has a 300 redirect """ @@ -892,7 +922,7 @@ def test_get_well_known_redirect(self): self.well_known_cache.expire() self.assertNotIn(b"testserv", self.well_known_cache) - def test_get_invalid_well_known(self): + def test_get_invalid_well_known(self) -> None: """ Test the behaviour when the server name has an *invalid* well-known (and no SRV) """ @@ -945,7 +975,7 @@ def test_get_invalid_well_known(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) - def test_get_well_known_unsigned_cert(self): + def test_get_well_known_unsigned_cert(self) -> None: """Test the behaviour when the .well-known server presents a cert not signed by a CA """ @@ -969,7 +999,7 @@ def test_get_well_known_unsigned_cert(self): ip_blacklist=IPSet(), _srv_resolver=self.mock_resolver, _well_known_resolver=WellKnownResolver( - self.reactor, + cast(ISynapseReactor, self.reactor), Agent(self.reactor, contextFactory=tls_factory), b"test-agent", well_known_cache=self.well_known_cache, @@ -999,7 +1029,7 @@ def test_get_well_known_unsigned_cert(self): b"_matrix._tcp.testserv" ) - def test_get_hostname_srv(self): + def test_get_hostname_srv(self) -> None: """ Test the behaviour when there is a single SRV record """ @@ -1041,7 +1071,7 @@ def test_get_hostname_srv(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) - def test_get_well_known_srv(self): + def test_get_well_known_srv(self) -> None: """Test the behaviour when the .well-known redirects to a place where there is a SRV. """ @@ -1101,7 +1131,7 @@ def test_get_well_known_srv(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) - def test_idna_servername(self): + def test_idna_servername(self) -> None: """test the behaviour when the server name has idna chars in""" self.agent = self._make_agent() @@ -1163,7 +1193,7 @@ def test_idna_servername(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) - def test_idna_srv_target(self): + def test_idna_srv_target(self) -> None: """test the behaviour when the target of a SRV record has idna chars""" self.agent = self._make_agent() @@ -1206,7 +1236,7 @@ def test_idna_srv_target(self): self.reactor.pump((0.1,)) self.successResultOf(test_d) - def test_well_known_cache(self): + def test_well_known_cache(self) -> None: self.reactor.lookups["testserv"] = "1.2.3.4" fetch_d = defer.ensureDeferred( @@ -1262,7 +1292,7 @@ def test_well_known_cache(self): r = self.successResultOf(fetch_d) self.assertEqual(r.delegated_server, b"other-server") - def test_well_known_cache_with_temp_failure(self): + def test_well_known_cache_with_temp_failure(self) -> None: """Test that we refetch well-known before the cache expires, and that it ignores transient errors. """ @@ -1341,7 +1371,7 @@ def test_well_known_cache_with_temp_failure(self): r = self.successResultOf(fetch_d) self.assertEqual(r.delegated_server, None) - def test_well_known_too_large(self): + def test_well_known_too_large(self) -> None: """A well-known query that returns a result which is too large should be rejected.""" self.reactor.lookups["testserv"] = "1.2.3.4" @@ -1367,7 +1397,7 @@ def test_well_known_too_large(self): r = self.successResultOf(fetch_d) self.assertIsNone(r.delegated_server) - def test_srv_fallbacks(self): + def test_srv_fallbacks(self) -> None: """Test that other SRV results are tried if the first one fails.""" self.agent = self._make_agent() @@ -1427,7 +1457,7 @@ def test_srv_fallbacks(self): class TestCachePeriodFromHeaders(unittest.TestCase): - def test_cache_control(self): + def test_cache_control(self) -> None: # uppercase self.assertEqual( _cache_period_from_headers( @@ -1464,7 +1494,7 @@ def test_cache_control(self): 0, ) - def test_expires(self): + def test_expires(self) -> None: self.assertEqual( _cache_period_from_headers( Headers({b"Expires": [b"Wed, 30 Jan 2019 07:35:33 GMT"]}), @@ -1491,14 +1521,14 @@ def test_expires(self): self.assertEqual(_cache_period_from_headers(Headers({b"Expires": [b"0"]})), 0) -def _check_logcontext(context): +def _check_logcontext(context: LoggingContextOrSentinel) -> None: current = current_context() if current is not context: raise AssertionError("Expected logcontext %s but was %s" % (context, current)) def _wrap_server_factory_for_tls( - factory: IProtocolFactory, sanlist: Iterable[bytes] = None + factory: IProtocolFactory, sanlist: Optional[List[bytes]] = None ) -> IProtocolFactory: """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory The resultant factory will create a TLS server which presents a certificate @@ -1537,7 +1567,7 @@ def _get_test_protocol_factory() -> IProtocolFactory: return server_factory -def _log_request(request: str): +def _log_request(request: str) -> None: """Implements Factory.log, which is expected by Request.finish""" logger.info(f"Completed request {request}") @@ -1547,6 +1577,8 @@ class TrustingTLSPolicyForHTTPS: """An IPolicyForHTTPS which checks that the certificate belongs to the right server, but doesn't check the certificate chain.""" - def creatorForNetloc(self, hostname, port): + def creatorForNetloc( + self, hostname: bytes, port: int + ) -> IOpenSSLClientConnectionCreator: certificateOptions = OpenSSLCertificateOptions() return ClientTLSOptions(hostname, certificateOptions.getContext()) diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 77ce8432ac53..7748f56ee6e2 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from typing import Dict, Generator, List, Tuple, cast from unittest.mock import Mock from twisted.internet import defer @@ -20,7 +20,7 @@ from twisted.internet.error import ConnectError from twisted.names import dns, error -from synapse.http.federation.srv_resolver import SrvResolver +from synapse.http.federation.srv_resolver import Server, SrvResolver from synapse.logging.context import LoggingContext, current_context from tests import unittest @@ -28,7 +28,7 @@ class SrvResolverTestCase(unittest.TestCase): - def test_resolve(self): + def test_resolve(self) -> None: dns_client_mock = Mock() service_name = b"test_service.example.com" @@ -38,18 +38,19 @@ def test_resolve(self): type=dns.SRV, payload=dns.Record_SRV(target=host_name) ) - result_deferred = Deferred() + result_deferred: "Deferred[Tuple[List[dns.RRHeader], None, None]]" = Deferred() dns_client_mock.lookupService.return_value = result_deferred - cache = {} + cache: Dict[bytes, List[Server]] = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) @defer.inlineCallbacks - def do_lookup(): + def do_lookup() -> Generator["Deferred[object]", object, List[Server]]: with LoggingContext("one") as ctx: resolve_d = resolver.resolve_service(service_name) - result = yield defer.ensureDeferred(resolve_d) + result: List[Server] + result = yield defer.ensureDeferred(resolve_d) # type: ignore[assignment] # should have restored our context self.assertIs(current_context(), ctx) @@ -70,7 +71,9 @@ def do_lookup(): self.assertEqual(servers[0].host, host_name) @defer.inlineCallbacks - def test_from_cache_expired_and_dns_fail(self): + def test_from_cache_expired_and_dns_fail( + self, + ) -> Generator["Deferred[object]", object, None]: dns_client_mock = Mock() dns_client_mock.lookupService.return_value = defer.fail(error.DNSServerError()) @@ -81,10 +84,13 @@ def test_from_cache_expired_and_dns_fail(self): entry.priority = 0 entry.weight = 0 - cache = {service_name: [entry]} + cache = {service_name: [cast(Server, entry)]} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) - servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) + servers: List[Server] + servers = yield defer.ensureDeferred( + resolver.resolve_service(service_name) + ) # type: ignore[assignment] dns_client_mock.lookupService.assert_called_once_with(service_name) @@ -92,7 +98,7 @@ def test_from_cache_expired_and_dns_fail(self): self.assertEqual(servers, cache[service_name]) @defer.inlineCallbacks - def test_from_cache(self): + def test_from_cache(self) -> Generator["Deferred[object]", object, None]: clock = MockClock() dns_client_mock = Mock(spec_set=["lookupService"]) @@ -105,12 +111,15 @@ def test_from_cache(self): entry.priority = 0 entry.weight = 0 - cache = {service_name: [entry]} + cache = {service_name: [cast(Server, entry)]} resolver = SrvResolver( dns_client=dns_client_mock, cache=cache, get_time=clock.time ) - servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) + servers: List[Server] + servers = yield defer.ensureDeferred( + resolver.resolve_service(service_name) + ) # type: ignore[assignment] self.assertFalse(dns_client_mock.lookupService.called) @@ -118,45 +127,48 @@ def test_from_cache(self): self.assertEqual(servers, cache[service_name]) @defer.inlineCallbacks - def test_empty_cache(self): + def test_empty_cache(self) -> Generator["Deferred[object]", object, None]: dns_client_mock = Mock() dns_client_mock.lookupService.return_value = defer.fail(error.DNSServerError()) service_name = b"test_service.example.com" - cache = {} + cache: Dict[bytes, List[Server]] = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) with self.assertRaises(error.DNSServerError): yield defer.ensureDeferred(resolver.resolve_service(service_name)) @defer.inlineCallbacks - def test_name_error(self): + def test_name_error(self) -> Generator["Deferred[object]", object, None]: dns_client_mock = Mock() dns_client_mock.lookupService.return_value = defer.fail(error.DNSNameError()) service_name = b"test_service.example.com" - cache = {} + cache: Dict[bytes, List[Server]] = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) - servers = yield defer.ensureDeferred(resolver.resolve_service(service_name)) + servers: List[Server] + servers = yield defer.ensureDeferred( + resolver.resolve_service(service_name) + ) # type: ignore[assignment] self.assertEqual(len(servers), 0) self.assertEqual(len(cache), 0) - def test_disabled_service(self): + def test_disabled_service(self) -> None: """ test the behaviour when there is a single record which is ".". """ service_name = b"test_service.example.com" - lookup_deferred = Deferred() + lookup_deferred: "Deferred[Tuple[List[dns.RRHeader], None, None]]" = Deferred() dns_client_mock = Mock() dns_client_mock.lookupService.return_value = lookup_deferred - cache = {} + cache: Dict[bytes, List[Server]] = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) # Old versions of Twisted don't have an ensureDeferred in failureResultOf. @@ -173,16 +185,16 @@ def test_disabled_service(self): self.failureResultOf(resolve_d, ConnectError) - def test_non_srv_answer(self): + def test_non_srv_answer(self) -> None: """ test the behaviour when the dns server gives us a spurious non-SRV response """ service_name = b"test_service.example.com" - lookup_deferred = Deferred() + lookup_deferred: "Deferred[Tuple[List[dns.RRHeader], None, None]]" = Deferred() dns_client_mock = Mock() dns_client_mock.lookupService.return_value = lookup_deferred - cache = {} + cache: Dict[bytes, List[Server]] = {} resolver = SrvResolver(dns_client=dns_client_mock, cache=cache) # Old versions of Twisted don't have an ensureDeferred in successResultOf. diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index 5071f835745e..36472e57a839 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -556,6 +556,6 @@ def _get_stack_frame_method_name(frame_info: inspect.FrameInfo) -> str: return method_name -def _hash_stack(stack: List[inspect.FrameInfo]): +def _hash_stack(stack: List[inspect.FrameInfo]) -> Tuple[str, ...]: """Turns a stack into a hashable value that can be put into a set.""" return tuple(_format_stack_frame(frame) for frame in stack) diff --git a/tests/http/test_additional_resource.py b/tests/http/test_additional_resource.py index 391196425c38..ec6aacf235ae 100644 --- a/tests/http/test_additional_resource.py +++ b/tests/http/test_additional_resource.py @@ -11,28 +11,34 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any +from twisted.web.server import Request from synapse.http.additional_resource import AdditionalResource from synapse.http.server import respond_with_json +from synapse.http.site import SynapseRequest +from synapse.types import JsonDict from tests.server import FakeSite, make_request from tests.unittest import HomeserverTestCase class _AsyncTestCustomEndpoint: - def __init__(self, config, module_api): + def __init__(self, config: JsonDict, module_api: Any) -> None: pass - async def handle_request(self, request): + async def handle_request(self, request: Request) -> None: + assert isinstance(request, SynapseRequest) respond_with_json(request, 200, {"some_key": "some_value_async"}) class _SyncTestCustomEndpoint: - def __init__(self, config, module_api): + def __init__(self, config: JsonDict, module_api: Any) -> None: pass - async def handle_request(self, request): + async def handle_request(self, request: Request) -> None: + assert isinstance(request, SynapseRequest) respond_with_json(request, 200, {"some_key": "some_value_sync"}) @@ -41,7 +47,7 @@ class AdditionalResourceTests(HomeserverTestCase): and async handlers. """ - def test_async(self): + def test_async(self) -> None: handler = _AsyncTestCustomEndpoint({}, None).handle_request resource = AdditionalResource(self.hs, handler) @@ -52,7 +58,7 @@ def test_async(self): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body, {"some_key": "some_value_async"}) - def test_sync(self): + def test_sync(self) -> None: handler = _SyncTestCustomEndpoint({}, None).handle_request resource = AdditionalResource(self.hs, handler) diff --git a/tests/http/test_client.py b/tests/http/test_client.py index 7e2f2a01cc07..9cfe1ad0de0a 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -13,10 +13,12 @@ # limitations under the License. from io import BytesIO +from typing import Tuple, Union from unittest.mock import Mock from netaddr import IPSet +from twisted.internet.defer import Deferred from twisted.internet.error import DNSLookupError from twisted.python.failure import Failure from twisted.test.proto_helpers import AccumulatingProtocol @@ -28,6 +30,7 @@ BlacklistingAgentWrapper, BlacklistingReactorWrapper, BodyExceededMaxSize, + _DiscardBodyWithMaxSizeProtocol, read_body_with_max_size, ) @@ -36,7 +39,9 @@ class ReadBodyWithMaxSizeTests(TestCase): - def _build_response(self, length=UNKNOWN_LENGTH): + def _build_response( + self, length: Union[int, str] = UNKNOWN_LENGTH + ) -> Tuple[BytesIO, "Deferred[int]", _DiscardBodyWithMaxSizeProtocol]: """Start reading the body, returns the response, result and proto""" response = Mock(length=length) result = BytesIO() @@ -48,23 +53,27 @@ def _build_response(self, length=UNKNOWN_LENGTH): return result, deferred, protocol - def _assert_error(self, deferred, protocol): + def _assert_error( + self, deferred: "Deferred[int]", protocol: _DiscardBodyWithMaxSizeProtocol + ) -> None: """Ensure that the expected error is received.""" - self.assertIsInstance(deferred.result, Failure) + assert isinstance(deferred.result, Failure) self.assertIsInstance(deferred.result.value, BodyExceededMaxSize) - protocol.transport.abortConnection.assert_called_once() + assert protocol.transport is not None + # type-ignore: presumably abortConnection has been replaced with a Mock. + protocol.transport.abortConnection.assert_called_once() # type: ignore[attr-defined] - def _cleanup_error(self, deferred): + def _cleanup_error(self, deferred: "Deferred[int]") -> None: """Ensure that the error in the Deferred is handled gracefully.""" called = [False] - def errback(f): + def errback(f: Failure) -> None: called[0] = True deferred.addErrback(errback) self.assertTrue(called[0]) - def test_no_error(self): + def test_no_error(self) -> None: """A response that is NOT too large.""" result, deferred, protocol = self._build_response() @@ -76,7 +85,7 @@ def test_no_error(self): self.assertEqual(result.getvalue(), b"12345") self.assertEqual(deferred.result, 5) - def test_too_large(self): + def test_too_large(self) -> None: """A response which is too large raises an exception.""" result, deferred, protocol = self._build_response() @@ -87,7 +96,7 @@ def test_too_large(self): self._assert_error(deferred, protocol) self._cleanup_error(deferred) - def test_multiple_packets(self): + def test_multiple_packets(self) -> None: """Data should be accumulated through mutliple packets.""" result, deferred, protocol = self._build_response() @@ -100,7 +109,7 @@ def test_multiple_packets(self): self.assertEqual(result.getvalue(), b"1234") self.assertEqual(deferred.result, 4) - def test_additional_data(self): + def test_additional_data(self) -> None: """A connection can receive data after being closed.""" result, deferred, protocol = self._build_response() @@ -115,7 +124,7 @@ def test_additional_data(self): self._assert_error(deferred, protocol) self._cleanup_error(deferred) - def test_content_length(self): + def test_content_length(self) -> None: """The body shouldn't be read (at all) if the Content-Length header is too large.""" result, deferred, protocol = self._build_response(length=10) @@ -132,7 +141,7 @@ def test_content_length(self): class BlacklistingAgentTest(TestCase): - def setUp(self): + def setUp(self) -> None: self.reactor, self.clock = get_clock() self.safe_domain, self.safe_ip = b"safe.test", b"1.2.3.4" @@ -151,7 +160,7 @@ def setUp(self): self.ip_whitelist = IPSet([self.allowed_ip.decode()]) self.ip_blacklist = IPSet(["5.0.0.0/8"]) - def test_reactor(self): + def test_reactor(self) -> None: """Apply the blacklisting reactor and ensure it properly blocks connections to particular domains and IPs.""" agent = Agent( BlacklistingReactorWrapper( @@ -197,7 +206,7 @@ def test_reactor(self): response = self.successResultOf(d) self.assertEqual(response.code, 200) - def test_agent(self): + def test_agent(self) -> None: """Apply the blacklisting agent and ensure it properly blocks connections to particular IPs.""" agent = BlacklistingAgentWrapper( Agent(self.reactor), diff --git a/tests/http/test_endpoint.py b/tests/http/test_endpoint.py index a801f002a004..8c18e5688177 100644 --- a/tests/http/test_endpoint.py +++ b/tests/http/test_endpoint.py @@ -17,7 +17,7 @@ class ServerNameTestCase(unittest.TestCase): - def test_parse_server_name(self): + def test_parse_server_name(self) -> None: test_data = { "localhost": ("localhost", None), "my-example.com:1234": ("my-example.com", 1234), @@ -32,7 +32,7 @@ def test_parse_server_name(self): for i, o in test_data.items(): self.assertEqual(parse_server_name(i), o) - def test_validate_bad_server_names(self): + def test_validate_bad_server_names(self) -> None: test_data = [ "", # empty "localhost:http", # non-numeric port diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index be9eaf34e8e7..fdd22a8e9437 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -11,16 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from typing import Generator from unittest.mock import Mock from netaddr import IPSet from parameterized import parameterized from twisted.internet import defer -from twisted.internet.defer import TimeoutError +from twisted.internet.defer import Deferred, TimeoutError from twisted.internet.error import ConnectingCancelledError, DNSLookupError -from twisted.test.proto_helpers import StringTransport +from twisted.test.proto_helpers import MemoryReactor, StringTransport from twisted.web.client import ResponseNeverReceived from twisted.web.http import HTTPChannel @@ -30,34 +30,43 @@ MatrixFederationHttpClient, MatrixFederationRequest, ) -from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context +from synapse.logging.context import ( + SENTINEL_CONTEXT, + LoggingContext, + LoggingContextOrSentinel, + current_context, +) +from synapse.server import HomeServer +from synapse.util import Clock from tests.server import FakeTransport from tests.unittest import HomeserverTestCase -def check_logcontext(context): +def check_logcontext(context: LoggingContextOrSentinel) -> None: current = current_context() if current is not context: raise AssertionError("Expected logcontext %s but was %s" % (context, current)) class FederationClientTests(HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver(reactor=reactor, clock=clock) return hs - def prepare(self, reactor, clock, homeserver): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: self.cl = MatrixFederationHttpClient(self.hs, None) self.reactor.lookups["testserv"] = "1.2.3.4" - def test_client_get(self): + def test_client_get(self) -> None: """ happy-path test of a GET request """ @defer.inlineCallbacks - def do_request(): + def do_request() -> Generator["Deferred[object]", object, object]: with LoggingContext("one") as context: fetch_d = defer.ensureDeferred( self.cl.get_json("testserv:8008", "foo/bar") @@ -119,7 +128,7 @@ def do_request(): # check the response is as expected self.assertEqual(res, {"a": 1}) - def test_dns_error(self): + def test_dns_error(self) -> None: """ If the DNS lookup returns an error, it will bubble up. """ @@ -132,7 +141,7 @@ def test_dns_error(self): self.assertIsInstance(f.value, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, DNSLookupError) - def test_client_connection_refused(self): + def test_client_connection_refused(self) -> None: d = defer.ensureDeferred( self.cl.get_json("testserv:8008", "foo/bar", timeout=10000) ) @@ -156,7 +165,7 @@ def test_client_connection_refused(self): self.assertIsInstance(f.value, RequestSendFailed) self.assertIs(f.value.inner_exception, e) - def test_client_never_connect(self): + def test_client_never_connect(self) -> None: """ If the HTTP request is not connected and is timed out, it'll give a ConnectingCancelledError or TimeoutError. @@ -188,7 +197,7 @@ def test_client_never_connect(self): f.value.inner_exception, (ConnectingCancelledError, TimeoutError) ) - def test_client_connect_no_response(self): + def test_client_connect_no_response(self) -> None: """ If the HTTP request is connected, but gets no response before being timed out, it'll give a ResponseNeverReceived. @@ -222,7 +231,7 @@ def test_client_connect_no_response(self): self.assertIsInstance(f.value, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, ResponseNeverReceived) - def test_client_ip_range_blacklist(self): + def test_client_ip_range_blacklist(self) -> None: """Ensure that Synapse does not try to connect to blacklisted IPs""" # Set up the ip_range blacklist @@ -292,7 +301,7 @@ def test_client_ip_range_blacklist(self): f = self.failureResultOf(d, RequestSendFailed) self.assertIsInstance(f.value.inner_exception, ConnectingCancelledError) - def test_client_gets_headers(self): + def test_client_gets_headers(self) -> None: """ Once the client gets the headers, _request returns successfully. """ @@ -319,7 +328,7 @@ def test_client_gets_headers(self): self.assertEqual(r.code, 200) @parameterized.expand(["get_json", "post_json", "delete_json", "put_json"]) - def test_timeout_reading_body(self, method_name: str): + def test_timeout_reading_body(self, method_name: str) -> None: """ If the HTTP request is connected, but gets no response before being timed out, it'll give a RequestSendFailed with can_retry. @@ -351,7 +360,7 @@ def test_timeout_reading_body(self, method_name: str): self.assertTrue(f.value.can_retry) self.assertIsInstance(f.value.inner_exception, defer.TimeoutError) - def test_client_requires_trailing_slashes(self): + def test_client_requires_trailing_slashes(self) -> None: """ If a connection is made to a client but the client rejects it due to requiring a trailing slash. We need to retry the request with a @@ -405,7 +414,7 @@ def test_client_requires_trailing_slashes(self): r = self.successResultOf(d) self.assertEqual(r, {}) - def test_client_does_not_retry_on_400_plus(self): + def test_client_does_not_retry_on_400_plus(self) -> None: """ Another test for trailing slashes but now test that we don't retry on trailing slashes on a non-400/M_UNRECOGNIZED response. @@ -450,7 +459,7 @@ def test_client_does_not_retry_on_400_plus(self): # We should get a 404 failure response self.failureResultOf(d) - def test_client_sends_body(self): + def test_client_sends_body(self) -> None: defer.ensureDeferred( self.cl.post_json( "testserv:8008", "foo/bar", timeout=10000, data={"a": "b"} @@ -474,7 +483,7 @@ def test_client_sends_body(self): content = request.content.read() self.assertEqual(content, b'{"a":"b"}') - def test_closes_connection(self): + def test_closes_connection(self) -> None: """Check that the client closes unused HTTP connections""" d = defer.ensureDeferred(self.cl.get_json("testserv:8008", "foo/bar")) @@ -514,7 +523,7 @@ def test_closes_connection(self): self.assertTrue(conn.disconnecting) @parameterized.expand([(b"",), (b"foo",), (b'{"a": Infinity}',)]) - def test_json_error(self, return_value): + def test_json_error(self, return_value: bytes) -> None: """ Test what happens if invalid JSON is returned from the remote endpoint. """ @@ -560,7 +569,7 @@ def test_json_error(self, return_value): f = self.failureResultOf(test_d) self.assertIsInstance(f.value, RequestSendFailed) - def test_too_big(self): + def test_too_big(self) -> None: """ Test what happens if a huge response is returned from the remote endpoint. """ diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 2db77c6a7345..a817940730fd 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -14,7 +14,7 @@ import base64 import logging import os -from typing import Iterable, Optional +from typing import List, Optional from unittest.mock import patch import treq @@ -22,7 +22,11 @@ from parameterized import parameterized from twisted.internet import interfaces # noqa: F401 -from twisted.internet.endpoints import HostnameEndpoint, _WrapperEndpoint +from twisted.internet.endpoints import ( + HostnameEndpoint, + _WrapperEndpoint, + _WrappingProtocol, +) from twisted.internet.interfaces import IProtocol, IProtocolFactory from twisted.internet.protocol import Factory from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol @@ -32,7 +36,11 @@ from synapse.http.connectproxyclient import ProxyCredentials from synapse.http.proxyagent import ProxyAgent, parse_proxy -from tests.http import TestServerTLSConnectionFactory, get_test_https_policy +from tests.http import ( + TestServerTLSConnectionFactory, + dummy_address, + get_test_https_policy, +) from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.unittest import TestCase @@ -183,7 +191,7 @@ def test_parse_proxy( expected_hostname: bytes, expected_port: int, expected_credentials: Optional[bytes], - ): + ) -> None: """ Tests that a given proxy URL will be broken into the components. Args: @@ -209,7 +217,7 @@ def test_parse_proxy( class MatrixFederationAgentTests(TestCase): - def setUp(self): + def setUp(self) -> None: self.reactor = ThreadedMemoryReactorClock() def _make_connection( @@ -218,7 +226,7 @@ def _make_connection( server_factory: IProtocolFactory, ssl: bool = False, expected_sni: Optional[bytes] = None, - tls_sanlist: Optional[Iterable[bytes]] = None, + tls_sanlist: Optional[List[bytes]] = None, ) -> IProtocol: """Builds a test server, and completes the outgoing client connection @@ -244,7 +252,8 @@ def _make_connection( if ssl: server_factory = _wrap_server_factory_for_tls(server_factory, tls_sanlist) - server_protocol = server_factory.buildProtocol(None) + server_protocol = server_factory.buildProtocol(dummy_address) + assert server_protocol is not None # now, tell the client protocol factory to build the client protocol, # and wire the output of said protocol up to the server via @@ -252,7 +261,8 @@ def _make_connection( # # Normally this would be done by the TCP socket code in Twisted, but we are # stubbing that out here. - client_protocol = client_factory.buildProtocol(None) + client_protocol = client_factory.buildProtocol(dummy_address) + assert client_protocol is not None client_protocol.makeConnection( FakeTransport(server_protocol, self.reactor, client_protocol) ) @@ -263,6 +273,7 @@ def _make_connection( ) if ssl: + assert isinstance(server_protocol, TLSMemoryBIOProtocol) http_protocol = server_protocol.wrappedProtocol tls_connection = server_protocol._tlsConnection else: @@ -288,7 +299,7 @@ def _test_request_direct_connection( scheme: bytes, hostname: bytes, path: bytes, - ): + ) -> None: """Runs a test case for a direct connection not going through a proxy. Args: @@ -319,6 +330,7 @@ def _test_request_direct_connection( ssl=is_https, expected_sni=hostname if is_https else None, ) + assert isinstance(http_server, HTTPChannel) # the FakeTransport is async, so we need to pump the reactor self.reactor.advance(0) @@ -339,34 +351,34 @@ def _test_request_direct_connection( body = self.successResultOf(treq.content(resp)) self.assertEqual(body, b"result") - def test_http_request(self): + def test_http_request(self) -> None: agent = ProxyAgent(self.reactor) self._test_request_direct_connection(agent, b"http", b"test.com", b"") - def test_https_request(self): + def test_https_request(self) -> None: agent = ProxyAgent(self.reactor, contextFactory=get_test_https_policy()) self._test_request_direct_connection(agent, b"https", b"test.com", b"abc") - def test_http_request_use_proxy_empty_environment(self): + def test_http_request_use_proxy_empty_environment(self) -> None: agent = ProxyAgent(self.reactor, use_proxy=True) self._test_request_direct_connection(agent, b"http", b"test.com", b"") @patch.dict(os.environ, {"http_proxy": "proxy.com:8888", "NO_PROXY": "test.com"}) - def test_http_request_via_uppercase_no_proxy(self): + def test_http_request_via_uppercase_no_proxy(self) -> None: agent = ProxyAgent(self.reactor, use_proxy=True) self._test_request_direct_connection(agent, b"http", b"test.com", b"") @patch.dict( os.environ, {"http_proxy": "proxy.com:8888", "no_proxy": "test.com,unused.com"} ) - def test_http_request_via_no_proxy(self): + def test_http_request_via_no_proxy(self) -> None: agent = ProxyAgent(self.reactor, use_proxy=True) self._test_request_direct_connection(agent, b"http", b"test.com", b"") @patch.dict( os.environ, {"https_proxy": "proxy.com", "no_proxy": "test.com,unused.com"} ) - def test_https_request_via_no_proxy(self): + def test_https_request_via_no_proxy(self) -> None: agent = ProxyAgent( self.reactor, contextFactory=get_test_https_policy(), @@ -375,12 +387,12 @@ def test_https_request_via_no_proxy(self): self._test_request_direct_connection(agent, b"https", b"test.com", b"abc") @patch.dict(os.environ, {"http_proxy": "proxy.com:8888", "no_proxy": "*"}) - def test_http_request_via_no_proxy_star(self): + def test_http_request_via_no_proxy_star(self) -> None: agent = ProxyAgent(self.reactor, use_proxy=True) self._test_request_direct_connection(agent, b"http", b"test.com", b"") @patch.dict(os.environ, {"https_proxy": "proxy.com", "no_proxy": "*"}) - def test_https_request_via_no_proxy_star(self): + def test_https_request_via_no_proxy_star(self) -> None: agent = ProxyAgent( self.reactor, contextFactory=get_test_https_policy(), @@ -389,7 +401,7 @@ def test_https_request_via_no_proxy_star(self): self._test_request_direct_connection(agent, b"https", b"test.com", b"abc") @patch.dict(os.environ, {"http_proxy": "proxy.com:8888", "no_proxy": "unused.com"}) - def test_http_request_via_proxy(self): + def test_http_request_via_proxy(self) -> None: """ Tests that requests can be made through a proxy. """ @@ -401,7 +413,7 @@ def test_http_request_via_proxy(self): os.environ, {"http_proxy": "bob:pinkponies@proxy.com:8888", "no_proxy": "unused.com"}, ) - def test_http_request_via_proxy_with_auth(self): + def test_http_request_via_proxy_with_auth(self) -> None: """ Tests that authenticated requests can be made through a proxy. """ @@ -412,7 +424,7 @@ def test_http_request_via_proxy_with_auth(self): @patch.dict( os.environ, {"http_proxy": "https://proxy.com:8888", "no_proxy": "unused.com"} ) - def test_http_request_via_https_proxy(self): + def test_http_request_via_https_proxy(self) -> None: self._do_http_request_via_proxy( expect_proxy_ssl=True, expected_auth_credentials=None ) @@ -424,13 +436,13 @@ def test_http_request_via_https_proxy(self): "no_proxy": "unused.com", }, ) - def test_http_request_via_https_proxy_with_auth(self): + def test_http_request_via_https_proxy_with_auth(self) -> None: self._do_http_request_via_proxy( expect_proxy_ssl=True, expected_auth_credentials=b"bob:pinkponies" ) @patch.dict(os.environ, {"https_proxy": "proxy.com", "no_proxy": "unused.com"}) - def test_https_request_via_proxy(self): + def test_https_request_via_proxy(self) -> None: """Tests that TLS-encrypted requests can be made through a proxy""" self._do_https_request_via_proxy( expect_proxy_ssl=False, expected_auth_credentials=None @@ -440,7 +452,7 @@ def test_https_request_via_proxy(self): os.environ, {"https_proxy": "bob:pinkponies@proxy.com", "no_proxy": "unused.com"}, ) - def test_https_request_via_proxy_with_auth(self): + def test_https_request_via_proxy_with_auth(self) -> None: """Tests that authenticated, TLS-encrypted requests can be made through a proxy""" self._do_https_request_via_proxy( expect_proxy_ssl=False, expected_auth_credentials=b"bob:pinkponies" @@ -449,7 +461,7 @@ def test_https_request_via_proxy_with_auth(self): @patch.dict( os.environ, {"https_proxy": "https://proxy.com", "no_proxy": "unused.com"} ) - def test_https_request_via_https_proxy(self): + def test_https_request_via_https_proxy(self) -> None: """Tests that TLS-encrypted requests can be made through a proxy""" self._do_https_request_via_proxy( expect_proxy_ssl=True, expected_auth_credentials=None @@ -459,7 +471,7 @@ def test_https_request_via_https_proxy(self): os.environ, {"https_proxy": "https://bob:pinkponies@proxy.com", "no_proxy": "unused.com"}, ) - def test_https_request_via_https_proxy_with_auth(self): + def test_https_request_via_https_proxy_with_auth(self) -> None: """Tests that authenticated, TLS-encrypted requests can be made through a proxy""" self._do_https_request_via_proxy( expect_proxy_ssl=True, expected_auth_credentials=b"bob:pinkponies" @@ -469,7 +481,7 @@ def _do_http_request_via_proxy( self, expect_proxy_ssl: bool = False, expected_auth_credentials: Optional[bytes] = None, - ): + ) -> None: """Send a http request via an agent and check that it is correctly received at the proxy. The proxy can use either http or https. Args: @@ -501,6 +513,7 @@ def _do_http_request_via_proxy( tls_sanlist=[b"DNS:proxy.com"] if expect_proxy_ssl else None, expected_sni=b"proxy.com" if expect_proxy_ssl else None, ) + assert isinstance(http_server, HTTPChannel) # the FakeTransport is async, so we need to pump the reactor self.reactor.advance(0) @@ -542,7 +555,7 @@ def _do_https_request_via_proxy( self, expect_proxy_ssl: bool = False, expected_auth_credentials: Optional[bytes] = None, - ): + ) -> None: """Send a https request via an agent and check that it is correctly received at the proxy and client. The proxy can use either http or https. Args: @@ -606,10 +619,12 @@ def _do_https_request_via_proxy( # now we make another test server to act as the upstream HTTP server. server_ssl_protocol = _wrap_server_factory_for_tls( _get_test_protocol_factory() - ).buildProtocol(None) + ).buildProtocol(dummy_address) + assert isinstance(server_ssl_protocol, TLSMemoryBIOProtocol) # Tell the HTTP server to send outgoing traffic back via the proxy's transport. proxy_server_transport = proxy_server.transport + assert proxy_server_transport is not None server_ssl_protocol.makeConnection(proxy_server_transport) # ... and replace the protocol on the proxy's transport with the @@ -644,6 +659,7 @@ def _do_https_request_via_proxy( # now there should be a pending request http_server = server_ssl_protocol.wrappedProtocol + assert isinstance(http_server, HTTPChannel) self.assertEqual(len(http_server.requests), 1) request = http_server.requests[0] @@ -667,7 +683,7 @@ def _do_https_request_via_proxy( self.assertEqual(body, b"result") @patch.dict(os.environ, {"http_proxy": "proxy.com:8888"}) - def test_http_request_via_proxy_with_blacklist(self): + def test_http_request_via_proxy_with_blacklist(self) -> None: # The blacklist includes the configured proxy IP. agent = ProxyAgent( BlacklistingReactorWrapper( @@ -691,6 +707,7 @@ def test_http_request_via_proxy_with_blacklist(self): http_server = self._make_connection( client_factory, _get_test_protocol_factory() ) + assert isinstance(http_server, HTTPChannel) # the FakeTransport is async, so we need to pump the reactor self.reactor.advance(0) @@ -712,7 +729,7 @@ def test_http_request_via_proxy_with_blacklist(self): self.assertEqual(body, b"result") @patch.dict(os.environ, {"HTTPS_PROXY": "proxy.com"}) - def test_https_request_via_uppercase_proxy_with_blacklist(self): + def test_https_request_via_uppercase_proxy_with_blacklist(self) -> None: # The blacklist includes the configured proxy IP. agent = ProxyAgent( BlacklistingReactorWrapper( @@ -737,11 +754,15 @@ def test_https_request_via_uppercase_proxy_with_blacklist(self): proxy_server = self._make_connection( client_factory, _get_test_protocol_factory() ) + assert isinstance(proxy_server, HTTPChannel) # fish the transports back out so that we can do the old switcheroo s2c_transport = proxy_server.transport + assert isinstance(s2c_transport, FakeTransport) client_protocol = s2c_transport.other + assert isinstance(client_protocol, _WrappingProtocol) c2s_transport = client_protocol.transport + assert isinstance(c2s_transport, FakeTransport) # the FakeTransport is async, so we need to pump the reactor self.reactor.advance(0) @@ -762,8 +783,10 @@ def test_https_request_via_uppercase_proxy_with_blacklist(self): # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel ssl_factory = _wrap_server_factory_for_tls(_get_test_protocol_factory()) - ssl_protocol = ssl_factory.buildProtocol(None) + ssl_protocol = ssl_factory.buildProtocol(dummy_address) + assert isinstance(ssl_protocol, TLSMemoryBIOProtocol) http_server = ssl_protocol.wrappedProtocol + assert isinstance(http_server, HTTPChannel) ssl_protocol.makeConnection( FakeTransport(client_protocol, self.reactor, ssl_protocol) @@ -797,28 +820,28 @@ def test_https_request_via_uppercase_proxy_with_blacklist(self): self.assertEqual(body, b"result") @patch.dict(os.environ, {"http_proxy": "proxy.com:8888"}) - def test_proxy_with_no_scheme(self): + def test_proxy_with_no_scheme(self) -> None: http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) - self.assertIsInstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) + assert isinstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) self.assertEqual(http_proxy_agent.http_proxy_endpoint._hostStr, "proxy.com") self.assertEqual(http_proxy_agent.http_proxy_endpoint._port, 8888) @patch.dict(os.environ, {"http_proxy": "socks://proxy.com:8888"}) - def test_proxy_with_unsupported_scheme(self): + def test_proxy_with_unsupported_scheme(self) -> None: with self.assertRaises(ValueError): ProxyAgent(self.reactor, use_proxy=True) @patch.dict(os.environ, {"http_proxy": "http://proxy.com:8888"}) - def test_proxy_with_http_scheme(self): + def test_proxy_with_http_scheme(self) -> None: http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) - self.assertIsInstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) + assert isinstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) self.assertEqual(http_proxy_agent.http_proxy_endpoint._hostStr, "proxy.com") self.assertEqual(http_proxy_agent.http_proxy_endpoint._port, 8888) @patch.dict(os.environ, {"http_proxy": "https://proxy.com:8888"}) - def test_proxy_with_https_scheme(self): + def test_proxy_with_https_scheme(self) -> None: https_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) - self.assertIsInstance(https_proxy_agent.http_proxy_endpoint, _WrapperEndpoint) + assert isinstance(https_proxy_agent.http_proxy_endpoint, _WrapperEndpoint) self.assertEqual( https_proxy_agent.http_proxy_endpoint._wrappedEndpoint._hostStr, "proxy.com" ) @@ -828,7 +851,7 @@ def test_proxy_with_https_scheme(self): def _wrap_server_factory_for_tls( - factory: IProtocolFactory, sanlist: Iterable[bytes] = None + factory: IProtocolFactory, sanlist: Optional[List[bytes]] = None ) -> IProtocolFactory: """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory @@ -865,6 +888,6 @@ def _get_test_protocol_factory() -> IProtocolFactory: return server_factory -def _log_request(request: str): +def _log_request(request: str) -> None: """Implements Factory.log, which is expected by Request.finish""" logger.info(f"Completed request {request}") diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index 46166292fed9..c8d215b6dcfe 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -14,7 +14,7 @@ import json from http import HTTPStatus from io import BytesIO -from typing import Tuple +from typing import Tuple, Union from unittest.mock import Mock from synapse.api.errors import Codes, SynapseError @@ -33,7 +33,7 @@ from tests.http.server._base import test_disconnect -def make_request(content): +def make_request(content: Union[bytes, JsonDict]) -> Mock: """Make an object that acts enough like a request.""" request = Mock(spec=["method", "uri", "content"]) @@ -47,7 +47,7 @@ def make_request(content): class TestServletUtils(unittest.TestCase): - def test_parse_json_value(self): + def test_parse_json_value(self) -> None: """Basic tests for parse_json_value_from_request.""" # Test round-tripping. obj = {"foo": 1} @@ -78,7 +78,7 @@ def test_parse_json_value(self): with self.assertRaises(SynapseError): parse_json_value_from_request(make_request(b'{"foo": Infinity}')) - def test_parse_json_object(self): + def test_parse_json_object(self) -> None: """Basic tests for parse_json_object_from_request.""" # Test empty. result = parse_json_object_from_request( diff --git a/tests/http/test_simple_client.py b/tests/http/test_simple_client.py index c85a3665c127..010601da4b93 100644 --- a/tests/http/test_simple_client.py +++ b/tests/http/test_simple_client.py @@ -17,22 +17,24 @@ from twisted.internet import defer from twisted.internet.error import DNSLookupError +from twisted.test.proto_helpers import MemoryReactor from synapse.http import RequestTimedOutError from synapse.http.client import SimpleHttpClient from synapse.server import HomeServer +from synapse.util import Clock from tests.unittest import HomeserverTestCase class SimpleHttpClientTests(HomeserverTestCase): - def prepare(self, reactor, clock, hs: "HomeServer"): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: "HomeServer") -> None: # Add a DNS entry for a test server self.reactor.lookups["testserv"] = "1.2.3.4" self.cl = hs.get_simple_http_client() - def test_dns_error(self): + def test_dns_error(self) -> None: """ If the DNS lookup returns an error, it will bubble up. """ @@ -42,7 +44,7 @@ def test_dns_error(self): f = self.failureResultOf(d) self.assertIsInstance(f.value, DNSLookupError) - def test_client_connection_refused(self): + def test_client_connection_refused(self) -> None: d = defer.ensureDeferred(self.cl.get_json("http://testserv:8008/foo/bar")) self.pump() @@ -63,7 +65,7 @@ def test_client_connection_refused(self): self.assertIs(f.value, e) - def test_client_never_connect(self): + def test_client_never_connect(self) -> None: """ If the HTTP request is not connected and is timed out, it'll give a ConnectingCancelledError or TimeoutError. @@ -90,7 +92,7 @@ def test_client_never_connect(self): self.assertIsInstance(f.value, RequestTimedOutError) - def test_client_connect_no_response(self): + def test_client_connect_no_response(self) -> None: """ If the HTTP request is connected, but gets no response before being timed out, it'll give a ResponseNeverReceived. @@ -121,7 +123,7 @@ def test_client_connect_no_response(self): self.assertIsInstance(f.value, RequestTimedOutError) - def test_client_ip_range_blacklist(self): + def test_client_ip_range_blacklist(self) -> None: """Ensure that Synapse does not try to connect to blacklisted IPs""" # Add some DNS entries we'll blacklist diff --git a/tests/http/test_site.py b/tests/http/test_site.py index b2dbf76d33b1..9a78fede9294 100644 --- a/tests/http/test_site.py +++ b/tests/http/test_site.py @@ -13,18 +13,20 @@ # limitations under the License. from twisted.internet.address import IPv6Address -from twisted.test.proto_helpers import StringTransport +from twisted.test.proto_helpers import MemoryReactor, StringTransport from synapse.app.homeserver import SynapseHomeServer +from synapse.server import HomeServer +from synapse.util import Clock from tests.unittest import HomeserverTestCase class SynapseRequestTestCase(HomeserverTestCase): - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return self.setup_test_homeserver(homeserver_to_use=SynapseHomeServer) - def test_large_request(self): + def test_large_request(self) -> None: """overlarge HTTP requests should be rejected""" self.hs.start_listening() diff --git a/tests/server.py b/tests/server.py index b1730fcc8dd5..237bcad8ba6f 100644 --- a/tests/server.py +++ b/tests/server.py @@ -70,7 +70,7 @@ from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.engines import PostgresEngine, create_engine -from synapse.types import JsonDict +from synapse.types import ISynapseReactor, JsonDict from synapse.util import Clock from tests.utils import ( @@ -401,7 +401,9 @@ def make_request( return channel -@implementer(IReactorPluggableNameResolver) +# ISynapseReactor implies IReactorPluggableNameResolver, but explicitly +# marking this as an implementer of the latter seems to keep mypy-zope happier. +@implementer(IReactorPluggableNameResolver, ISynapseReactor) class ThreadedMemoryReactorClock(MemoryReactorClock): """ A MemoryReactorClock that supports callFromThread. From 5b55c32d610b2baec8622f0418519b130ab4fa30 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Feb 2023 06:56:09 -0500 Subject: [PATCH 135/344] Add tests for using _flatten_dict with an event. (#15002) --- changelog.d/15002.misc | 1 + synapse/push/bulk_push_rule_evaluator.py | 13 ++--- tests/push/test_push_rule_evaluator.py | 63 +++++++++++++++++++++++- 3 files changed, 68 insertions(+), 9 deletions(-) create mode 100644 changelog.d/15002.misc diff --git a/changelog.d/15002.misc b/changelog.d/15002.misc new file mode 100644 index 000000000000..68ac8335fc4c --- /dev/null +++ b/changelog.d/15002.misc @@ -0,0 +1 @@ +Add tests for `_flatten_dict`. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index f73dceb128bc..d9c0a98f44ee 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -36,7 +36,7 @@ Membership, RelationTypes, ) -from synapse.api.room_versions import PushRuleRoomFlag, RoomVersion +from synapse.api.room_versions import PushRuleRoomFlag from synapse.event_auth import auth_types_for_event, get_user_power_level from synapse.events import EventBase, relation_from_event from synapse.events.snapshot import EventContext @@ -405,7 +405,7 @@ async def _action_for_event_by_user( room_mention = mentions.get("room") is True evaluator = PushRuleEvaluator( - _flatten_dict(event, room_version=event.room_version), + _flatten_dict(event), has_mentions, user_mentions, room_mention, @@ -491,7 +491,6 @@ async def _action_for_event_by_user( def _flatten_dict( d: Union[EventBase, Mapping[str, Any]], - room_version: Optional[RoomVersion] = None, prefix: Optional[List[str]] = None, result: Optional[Dict[str, str]] = None, ) -> Dict[str, str]: @@ -511,7 +510,6 @@ def _flatten_dict( Args: d: The event or content to continue flattening. - room_version: The room version object. prefix: The key prefix (from outer dictionaries). result: The result to mutate. @@ -531,14 +529,13 @@ def _flatten_dict( # `room_version` should only ever be set when looking at the top level of an event if ( - room_version is not None - and PushRuleRoomFlag.EXTENSIBLE_EVENTS in room_version.msc3931_push_features - and isinstance(d, EventBase) + isinstance(d, EventBase) + and PushRuleRoomFlag.EXTENSIBLE_EVENTS in d.room_version.msc3931_push_features ): # Room supports extensible events: replace `content.body` with the plain text # representation from `m.markup`, as per MSC1767. markup = d.get("content").get("m.markup") - if room_version.identifier.startswith("org.matrix.msc1767."): + if d.room_version.identifier.startswith("org.matrix.msc1767."): markup = d.get("content").get("org.matrix.msc1767.markup") if markup is not None and isinstance(markup, list): text = "" diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 7c430c4ecbd8..da3342387121 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -22,7 +22,7 @@ from synapse.api.constants import EventTypes, HistoryVisibility, Membership from synapse.api.room_versions import RoomVersions from synapse.appservice import ApplicationService -from synapse.events import FrozenEvent +from synapse.events import FrozenEvent, make_event_from_dict from synapse.push.bulk_push_rule_evaluator import _flatten_dict from synapse.push.httppusher import tweaks_for_actions from synapse.rest import admin @@ -60,6 +60,67 @@ def test_non_string(self) -> None: } self.assertEqual({"woo": "woo"}, _flatten_dict(input)) + def test_event(self) -> None: + """Events can also be flattened.""" + event = make_event_from_dict( + { + "room_id": "!test:test", + "type": "m.room.message", + "sender": "@alice:test", + "content": { + "msgtype": "m.text", + "body": "Hello world!", + "format": "org.matrix.custom.html", + "formatted_body": "

Hello world!

", + }, + }, + room_version=RoomVersions.V8, + ) + expected = { + "content.msgtype": "m.text", + "content.body": "hello world!", + "content.format": "org.matrix.custom.html", + "content.formatted_body": "

hello world!

", + "room_id": "!test:test", + "sender": "@alice:test", + "type": "m.room.message", + } + self.assertEqual(expected, _flatten_dict(event)) + + def test_extensible_events(self) -> None: + """Extensible events has compatibility behaviour.""" + event_dict = { + "room_id": "!test:test", + "type": "m.room.message", + "sender": "@alice:test", + "content": { + "org.matrix.msc1767.markup": [ + {"mimetype": "text/plain", "body": "Hello world!"}, + {"mimetype": "text/html", "body": "

Hello world!

"}, + ] + }, + } + + # For a current room version, there's no special behavior. + event = make_event_from_dict(event_dict, room_version=RoomVersions.V8) + expected = { + "room_id": "!test:test", + "sender": "@alice:test", + "type": "m.room.message", + } + self.assertEqual(expected, _flatten_dict(event)) + + # For a room version with extensible events, they parse out the text/plain + # to a content.body property. + event = make_event_from_dict(event_dict, room_version=RoomVersions.MSC1767v10) + expected = { + "content.body": "hello world!", + "room_id": "!test:test", + "sender": "@alice:test", + "type": "m.room.message", + } + self.assertEqual(expected, _flatten_dict(event)) + class PushRuleEvaluatorTestCase(unittest.TestCase): def _get_evaluator( From 4dd2b6165c876fda978d47a907aaabaab3c794ab Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Feb 2023 12:03:39 +0000 Subject: [PATCH 136/344] Proper types for tests.test_terms_auth (#15007) * Proper types for tests.test_terms_auth * Changelog --- changelog.d/15007.misc | 1 + mypy.ini | 4 +++- tests/test_terms_auth.py | 19 +++++++++++++------ 3 files changed, 17 insertions(+), 7 deletions(-) create mode 100644 changelog.d/15007.misc diff --git a/changelog.d/15007.misc b/changelog.d/15007.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15007.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 11e683b7042c..0efafb26b6a6 100644 --- a/mypy.ini +++ b/mypy.ini @@ -35,7 +35,6 @@ exclude = (?x) |tests/module_api/test_api.py |tests/rest/media/v1/test_media_storage.py |tests/server.py - |tests/test_terms_auth.py )$ [mypy-synapse.federation.transport.client] @@ -119,6 +118,9 @@ disallow_untyped_defs = True [mypy-tests.test_state] disallow_untyped_defs = True +[mypy-tests.test_terms_auth] +disallow_untyped_defs = True + [mypy-tests.types.*] disallow_untyped_defs = True diff --git a/tests/test_terms_auth.py b/tests/test_terms_auth.py index abd7459a8cb7..52424aa08713 100644 --- a/tests/test_terms_auth.py +++ b/tests/test_terms_auth.py @@ -14,9 +14,12 @@ from unittest.mock import Mock -from twisted.test.proto_helpers import MemoryReactorClock +from twisted.internet.interfaces import IReactorTime +from twisted.test.proto_helpers import MemoryReactor, MemoryReactorClock from synapse.rest.client.register import register_servlets +from synapse.server import HomeServer +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest @@ -25,7 +28,7 @@ class TermsTestCase(unittest.HomeserverTestCase): servlets = [register_servlets] - def default_config(self): + def default_config(self) -> JsonDict: config = super().default_config() config.update( { @@ -40,17 +43,21 @@ def default_config(self): ) return config - def prepare(self, reactor, clock, hs): - self.clock = MemoryReactorClock() + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: + # type-ignore: mypy-zope doesn't seem to recognise that MemoryReactorClock + # implements IReactorTime, via inheritance from twisted.internet.testing.Clock + self.clock: IReactorTime = MemoryReactorClock() # type: ignore[assignment] self.hs_clock = Clock(self.clock) self.url = "/_matrix/client/r0/register" self.registration_handler = Mock() self.auth_handler = Mock() self.device_handler = Mock() - def test_ui_auth(self): + def test_ui_auth(self) -> None: # Do a UI auth request - request_data = {"username": "kermit", "password": "monkey"} + request_data: JsonDict = {"username": "kermit", "password": "monkey"} channel = self.make_request(b"POST", self.url, request_data) self.assertEqual(channel.code, 401, channel.result) From f630536a9455107b27284cb56a7f5d0a4afe7de2 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Feb 2023 13:45:19 +0000 Subject: [PATCH 137/344] 1.77.0rc1 --- CHANGES.md | 60 +++++++++++++++++++++++++++++++++++++++ changelog.d/14823.feature | 1 - changelog.d/14858.misc | 1 - changelog.d/14866.bugfix | 1 - changelog.d/14879.misc | 1 - changelog.d/14880.bugfix | 1 - changelog.d/14886.misc | 1 - changelog.d/14887.misc | 1 - changelog.d/14894.feature | 1 - changelog.d/14904.misc | 1 - changelog.d/14908.misc | 1 - changelog.d/14915.bugfix | 1 - changelog.d/14916.misc | 1 - changelog.d/14920.misc | 1 - changelog.d/14922.misc | 1 - changelog.d/14926.bugfix | 1 - changelog.d/14927.misc | 1 - changelog.d/14935.misc | 1 - changelog.d/14936.misc | 1 - changelog.d/14937.misc | 1 - changelog.d/14938.misc | 1 - changelog.d/14939.misc | 1 - changelog.d/14942.bugfix | 1 - changelog.d/14943.feature | 1 - changelog.d/14944.bugfix | 1 - changelog.d/14945.misc | 1 - changelog.d/14947.bugfix | 1 - changelog.d/14949.misc | 1 - changelog.d/14950.misc | 1 - changelog.d/14952.misc | 1 - changelog.d/14953.misc | 1 - changelog.d/14954.misc | 1 - changelog.d/14956.misc | 1 - changelog.d/14957.feature | 1 - changelog.d/14958.feature | 1 - changelog.d/14960.feature | 1 - changelog.d/14962.feature | 1 - changelog.d/14965.misc | 1 - changelog.d/14968.misc | 1 - changelog.d/14970.misc | 1 - changelog.d/14971.misc | 1 - changelog.d/14976.bugfix | 1 - changelog.d/14979.misc | 1 - changelog.d/14981.misc | 1 - changelog.d/14983.misc | 1 - changelog.d/14984.misc | 1 - changelog.d/14985.misc | 1 - changelog.d/14987.misc | 1 - changelog.d/14988.misc | 1 - changelog.d/14990.misc | 1 - changelog.d/14991.misc | 1 - changelog.d/14992.misc | 1 - changelog.d/14993.misc | 1 - changelog.d/14994.misc | 1 - changelog.d/14995.misc | 1 - changelog.d/14996.misc | 1 - changelog.d/14997.misc | 1 - changelog.d/14998.misc | 1 - changelog.d/14999.misc | 1 - changelog.d/15002.misc | 1 - changelog.d/15007.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 63 files changed, 67 insertions(+), 61 deletions(-) delete mode 100644 changelog.d/14823.feature delete mode 100644 changelog.d/14858.misc delete mode 100644 changelog.d/14866.bugfix delete mode 100644 changelog.d/14879.misc delete mode 100644 changelog.d/14880.bugfix delete mode 100644 changelog.d/14886.misc delete mode 100644 changelog.d/14887.misc delete mode 100644 changelog.d/14894.feature delete mode 100644 changelog.d/14904.misc delete mode 100644 changelog.d/14908.misc delete mode 100644 changelog.d/14915.bugfix delete mode 100644 changelog.d/14916.misc delete mode 100644 changelog.d/14920.misc delete mode 100644 changelog.d/14922.misc delete mode 100644 changelog.d/14926.bugfix delete mode 100644 changelog.d/14927.misc delete mode 100644 changelog.d/14935.misc delete mode 100644 changelog.d/14936.misc delete mode 100644 changelog.d/14937.misc delete mode 100644 changelog.d/14938.misc delete mode 100644 changelog.d/14939.misc delete mode 100644 changelog.d/14942.bugfix delete mode 100644 changelog.d/14943.feature delete mode 100644 changelog.d/14944.bugfix delete mode 100644 changelog.d/14945.misc delete mode 100644 changelog.d/14947.bugfix delete mode 100644 changelog.d/14949.misc delete mode 100644 changelog.d/14950.misc delete mode 100644 changelog.d/14952.misc delete mode 100644 changelog.d/14953.misc delete mode 100644 changelog.d/14954.misc delete mode 100644 changelog.d/14956.misc delete mode 100644 changelog.d/14957.feature delete mode 100644 changelog.d/14958.feature delete mode 100644 changelog.d/14960.feature delete mode 100644 changelog.d/14962.feature delete mode 100644 changelog.d/14965.misc delete mode 100644 changelog.d/14968.misc delete mode 100644 changelog.d/14970.misc delete mode 100644 changelog.d/14971.misc delete mode 100644 changelog.d/14976.bugfix delete mode 100644 changelog.d/14979.misc delete mode 100644 changelog.d/14981.misc delete mode 100644 changelog.d/14983.misc delete mode 100644 changelog.d/14984.misc delete mode 100644 changelog.d/14985.misc delete mode 100644 changelog.d/14987.misc delete mode 100644 changelog.d/14988.misc delete mode 100644 changelog.d/14990.misc delete mode 100644 changelog.d/14991.misc delete mode 100644 changelog.d/14992.misc delete mode 100644 changelog.d/14993.misc delete mode 100644 changelog.d/14994.misc delete mode 100644 changelog.d/14995.misc delete mode 100644 changelog.d/14996.misc delete mode 100644 changelog.d/14997.misc delete mode 100644 changelog.d/14998.misc delete mode 100644 changelog.d/14999.misc delete mode 100644 changelog.d/15002.misc delete mode 100644 changelog.d/15007.misc diff --git a/CHANGES.md b/CHANGES.md index 5f0c50851f23..1951ea931dca 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,63 @@ +Synapse 1.77.0rc1 (2023-02-07) +============================== + +Features +-------- + +- Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#14823](https://github.com/matrix-org/synapse/issues/14823), [\#14943](https://github.com/matrix-org/synapse/issues/14943), [\#14957](https://github.com/matrix-org/synapse/issues/14957), [\#14958](https://github.com/matrix-org/synapse/issues/14958)) +- Adds profile information, devices and connections to the user data export via command line. ([\#14894](https://github.com/matrix-org/synapse/issues/14894)) +- Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#14960](https://github.com/matrix-org/synapse/issues/14960)) +- Improve performance when joining or sending an event large rooms. ([\#14962](https://github.com/matrix-org/synapse/issues/14962)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.53.0 where `next_batch` tokens from `/sync` could not be used with the `/relations` endpoint. ([\#14866](https://github.com/matrix-org/synapse/issues/14866)) +- Fix a bug when using the `send_local_online_presence_to` module API. ([\#14880](https://github.com/matrix-org/synapse/issues/14880)) +- Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14915](https://github.com/matrix-org/synapse/issues/14915)) +- Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14926](https://github.com/matrix-org/synapse/issues/14926)) +- Fix a bug introduced in Synapse 1.68.0 where we were unable to service remote joins in rooms with `@room` notification levels set to `null` in their (malformed) power levels. ([\#14942](https://github.com/matrix-org/synapse/issues/14942)) +- Fix a bug introduced in Synapse v1.64 where boolean power levels were erroneously permitted in [v10 rooms](https://spec.matrix.org/v1.5/rooms/v10/). ([\#14944](https://github.com/matrix-org/synapse/issues/14944)) +- Fix a long-standing bug where sending messages on servers with presence enabled would spam "Re-starting finished log context" log lines. ([\#14947](https://github.com/matrix-org/synapse/issues/14947)) +- Fix a bug introduced in Synapse 1.68.0 where logging from the Rust module was not properly logged. ([\#14976](https://github.com/matrix-org/synapse/issues/14976)) + + +Internal Changes +---------------- + +- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14858](https://github.com/matrix-org/synapse/issues/14858)) +- Add missing type hints. ([\#14879](https://github.com/matrix-org/synapse/issues/14879), [\#14886](https://github.com/matrix-org/synapse/issues/14886), [\#14887](https://github.com/matrix-org/synapse/issues/14887), [\#14904](https://github.com/matrix-org/synapse/issues/14904), [\#14927](https://github.com/matrix-org/synapse/issues/14927), [\#14956](https://github.com/matrix-org/synapse/issues/14956)) +- Improve performance of `/sync` in a few situations. ([\#14908](https://github.com/matrix-org/synapse/issues/14908), [\#14970](https://github.com/matrix-org/synapse/issues/14970)) +- Document how to handle Dependabot pull requests. ([\#14916](https://github.com/matrix-org/synapse/issues/14916)) +- Fix typo in release script. ([\#14920](https://github.com/matrix-org/synapse/issues/14920)) +- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14922](https://github.com/matrix-org/synapse/issues/14922)) +- Bump ijson from 3.1.4 to 3.2.0.post0. ([\#14935](https://github.com/matrix-org/synapse/issues/14935)) +- Bump types-pyyaml from 6.0.12.2 to 6.0.12.3. ([\#14936](https://github.com/matrix-org/synapse/issues/14936)) +- Bump types-jsonschema from 4.17.0.2 to 4.17.0.3. ([\#14937](https://github.com/matrix-org/synapse/issues/14937)) +- Bump types-pillow from 9.4.0.3 to 9.4.0.5. ([\#14938](https://github.com/matrix-org/synapse/issues/14938)) +- Bump hiredis from 2.0.0 to 2.1.1. ([\#14939](https://github.com/matrix-org/synapse/issues/14939)) +- Fix various long-standing bugs in Synapse's config, event and request handling where booleans were unintentionally accepted where an integer was expected. ([\#14945](https://github.com/matrix-org/synapse/issues/14945)) +- Update build system requirements to allow building with poetry-core==1.5.0. ([\#14949](https://github.com/matrix-org/synapse/issues/14949)) +- Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. ([\#14950](https://github.com/matrix-org/synapse/issues/14950)) +- Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952)) +- Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. ([\#14953](https://github.com/matrix-org/synapse/issues/14953)) +- Faster room joins: Refactor internal handling of servers in room to never store an empty list. ([\#14954](https://github.com/matrix-org/synapse/issues/14954)) +- Allow running `cargo` without the `extension-module` option. ([\#14965](https://github.com/matrix-org/synapse/issues/14965)) +- Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968)) +- Improve performance of joining and leaving large rooms with many local users. ([\#14971](https://github.com/matrix-org/synapse/issues/14971)) +- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979)) +- Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002)) +- Improve type hints. ([\#14983](https://github.com/matrix-org/synapse/issues/14983), [\#14984](https://github.com/matrix-org/synapse/issues/14984), [\#14985](https://github.com/matrix-org/synapse/issues/14985), [\#14987](https://github.com/matrix-org/synapse/issues/14987), [\#14988](https://github.com/matrix-org/synapse/issues/14988), [\#14990](https://github.com/matrix-org/synapse/issues/14990), [\#14991](https://github.com/matrix-org/synapse/issues/14991), [\#14992](https://github.com/matrix-org/synapse/issues/14992), [\#15007](https://github.com/matrix-org/synapse/issues/15007)) +- Bump hiredis from 2.1.1 to 2.2.1. ([\#14993](https://github.com/matrix-org/synapse/issues/14993)) +- Bump types-setuptools from 65.6.0.3 to 67.1.0.0. ([\#14994](https://github.com/matrix-org/synapse/issues/14994)) +- Bump prometheus-client from 0.15.0 to 0.16.0. ([\#14995](https://github.com/matrix-org/synapse/issues/14995)) +- Bump anyhow from 1.0.68 to 1.0.69. ([\#14996](https://github.com/matrix-org/synapse/issues/14996)) +- Bump serde_json from 1.0.91 to 1.0.92. ([\#14997](https://github.com/matrix-org/synapse/issues/14997)) +- Bump isort from 5.11.4 to 5.11.5. ([\#14998](https://github.com/matrix-org/synapse/issues/14998)) +- Bump phonenumbers from 8.13.4 to 8.13.5. ([\#14999](https://github.com/matrix-org/synapse/issues/14999)) + + Synapse 1.76.0 (2023-01-31) =========================== diff --git a/changelog.d/14823.feature b/changelog.d/14823.feature deleted file mode 100644 index 8293e99effbc..000000000000 --- a/changelog.d/14823.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/changelog.d/14858.misc b/changelog.d/14858.misc deleted file mode 100644 index c48f40cd3876..000000000000 --- a/changelog.d/14858.misc +++ /dev/null @@ -1 +0,0 @@ -Run the integration test suites with the asyncio reactor enabled in CI. diff --git a/changelog.d/14866.bugfix b/changelog.d/14866.bugfix deleted file mode 100644 index 540f918cbd10..000000000000 --- a/changelog.d/14866.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.53.0 where `next_batch` tokens from `/sync` could not be used with the `/relations` endpoint. diff --git a/changelog.d/14879.misc b/changelog.d/14879.misc deleted file mode 100644 index d44571b73149..000000000000 --- a/changelog.d/14879.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints. diff --git a/changelog.d/14880.bugfix b/changelog.d/14880.bugfix deleted file mode 100644 index e56c567082a5..000000000000 --- a/changelog.d/14880.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug when using the `send_local_online_presence_to` module API. diff --git a/changelog.d/14886.misc b/changelog.d/14886.misc deleted file mode 100644 index 9f5384e60e78..000000000000 --- a/changelog.d/14886.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints. \ No newline at end of file diff --git a/changelog.d/14887.misc b/changelog.d/14887.misc deleted file mode 100644 index 9f5384e60e78..000000000000 --- a/changelog.d/14887.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints. \ No newline at end of file diff --git a/changelog.d/14894.feature b/changelog.d/14894.feature deleted file mode 100644 index d22741d079af..000000000000 --- a/changelog.d/14894.feature +++ /dev/null @@ -1 +0,0 @@ -Adds profile information, devices and connections to the user data export via command line. \ No newline at end of file diff --git a/changelog.d/14904.misc b/changelog.d/14904.misc deleted file mode 100644 index d44571b73149..000000000000 --- a/changelog.d/14904.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints. diff --git a/changelog.d/14908.misc b/changelog.d/14908.misc deleted file mode 100644 index 365762360285..000000000000 --- a/changelog.d/14908.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of `/sync` in a few situations. diff --git a/changelog.d/14915.bugfix b/changelog.d/14915.bugfix deleted file mode 100644 index 4969e5450c3f..000000000000 --- a/changelog.d/14915.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. diff --git a/changelog.d/14916.misc b/changelog.d/14916.misc deleted file mode 100644 index 59914d4b8afb..000000000000 --- a/changelog.d/14916.misc +++ /dev/null @@ -1 +0,0 @@ -Document how to handle Dependabot pull requests. diff --git a/changelog.d/14920.misc b/changelog.d/14920.misc deleted file mode 100644 index 7988897d3933..000000000000 --- a/changelog.d/14920.misc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in release script. diff --git a/changelog.d/14922.misc b/changelog.d/14922.misc deleted file mode 100644 index 2cc3614dfded..000000000000 --- a/changelog.d/14922.misc +++ /dev/null @@ -1 +0,0 @@ -Use `StrCollection` to avoid potential bugs with `Collection[str]`. diff --git a/changelog.d/14926.bugfix b/changelog.d/14926.bugfix deleted file mode 100644 index f1f34cd6badb..000000000000 --- a/changelog.d/14926.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. diff --git a/changelog.d/14927.misc b/changelog.d/14927.misc deleted file mode 100644 index 9f5384e60e78..000000000000 --- a/changelog.d/14927.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints. \ No newline at end of file diff --git a/changelog.d/14935.misc b/changelog.d/14935.misc deleted file mode 100644 index 0ad74b90eb73..000000000000 --- a/changelog.d/14935.misc +++ /dev/null @@ -1 +0,0 @@ -Bump ijson from 3.1.4 to 3.2.0.post0. diff --git a/changelog.d/14936.misc b/changelog.d/14936.misc deleted file mode 100644 index bd5001173f7a..000000000000 --- a/changelog.d/14936.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pyyaml from 6.0.12.2 to 6.0.12.3. diff --git a/changelog.d/14937.misc b/changelog.d/14937.misc deleted file mode 100644 index 061568f010ac..000000000000 --- a/changelog.d/14937.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-jsonschema from 4.17.0.2 to 4.17.0.3. diff --git a/changelog.d/14938.misc b/changelog.d/14938.misc deleted file mode 100644 index f2fbabe8bb07..000000000000 --- a/changelog.d/14938.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pillow from 9.4.0.3 to 9.4.0.5. diff --git a/changelog.d/14939.misc b/changelog.d/14939.misc deleted file mode 100644 index 236826e44174..000000000000 --- a/changelog.d/14939.misc +++ /dev/null @@ -1 +0,0 @@ -Bump hiredis from 2.0.0 to 2.1.1. diff --git a/changelog.d/14942.bugfix b/changelog.d/14942.bugfix deleted file mode 100644 index a3ca3eb7e9d9..000000000000 --- a/changelog.d/14942.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.68.0 where we were unable to service remote joins in rooms with `@room` notification levels set to `null` in their (malformed) power levels. diff --git a/changelog.d/14943.feature b/changelog.d/14943.feature deleted file mode 100644 index 8293e99effbc..000000000000 --- a/changelog.d/14943.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/changelog.d/14944.bugfix b/changelog.d/14944.bugfix deleted file mode 100644 index 5fe1fb322b40..000000000000 --- a/changelog.d/14944.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.64 where boolean power levels were erroneously permitted in [v10 rooms](https://spec.matrix.org/v1.5/rooms/v10/). diff --git a/changelog.d/14945.misc b/changelog.d/14945.misc deleted file mode 100644 index 654174f9a81f..000000000000 --- a/changelog.d/14945.misc +++ /dev/null @@ -1 +0,0 @@ -Fix various long-standing bugs in Synapse's config, event and request handling where booleans were unintentionally accepted where an integer was expected. diff --git a/changelog.d/14947.bugfix b/changelog.d/14947.bugfix deleted file mode 100644 index b9e768c44c83..000000000000 --- a/changelog.d/14947.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where sending messages on servers with presence enabled would spam "Re-starting finished log context" log lines. diff --git a/changelog.d/14949.misc b/changelog.d/14949.misc deleted file mode 100644 index 793a78a5dd5c..000000000000 --- a/changelog.d/14949.misc +++ /dev/null @@ -1 +0,0 @@ -Update build system requirements to allow building with poetry-core==1.5.0. diff --git a/changelog.d/14950.misc b/changelog.d/14950.misc deleted file mode 100644 index 6602776b3ffd..000000000000 --- a/changelog.d/14950.misc +++ /dev/null @@ -1 +0,0 @@ -Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. diff --git a/changelog.d/14952.misc b/changelog.d/14952.misc deleted file mode 100644 index 4add8446d250..000000000000 --- a/changelog.d/14952.misc +++ /dev/null @@ -1 +0,0 @@ -Bump docker/build-push-action from 3 to 4. diff --git a/changelog.d/14953.misc b/changelog.d/14953.misc deleted file mode 100644 index 1741e1627caa..000000000000 --- a/changelog.d/14953.misc +++ /dev/null @@ -1 +0,0 @@ -Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. diff --git a/changelog.d/14954.misc b/changelog.d/14954.misc deleted file mode 100644 index b86b6bf01e24..000000000000 --- a/changelog.d/14954.misc +++ /dev/null @@ -1 +0,0 @@ -Faster room joins: Refactor internal handling of servers in room to never store an empty list. diff --git a/changelog.d/14956.misc b/changelog.d/14956.misc deleted file mode 100644 index 9f5384e60e78..000000000000 --- a/changelog.d/14956.misc +++ /dev/null @@ -1 +0,0 @@ -Add missing type hints. \ No newline at end of file diff --git a/changelog.d/14957.feature b/changelog.d/14957.feature deleted file mode 100644 index 8293e99effbc..000000000000 --- a/changelog.d/14957.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/changelog.d/14958.feature b/changelog.d/14958.feature deleted file mode 100644 index 8293e99effbc..000000000000 --- a/changelog.d/14958.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. diff --git a/changelog.d/14960.feature b/changelog.d/14960.feature deleted file mode 100644 index b9bb331273a7..000000000000 --- a/changelog.d/14960.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). diff --git a/changelog.d/14962.feature b/changelog.d/14962.feature deleted file mode 100644 index 38f26012f23c..000000000000 --- a/changelog.d/14962.feature +++ /dev/null @@ -1 +0,0 @@ -Improve performance when joining or sending an event large rooms. diff --git a/changelog.d/14965.misc b/changelog.d/14965.misc deleted file mode 100644 index 3ae0537d9665..000000000000 --- a/changelog.d/14965.misc +++ /dev/null @@ -1 +0,0 @@ -Allow running `cargo` without the `extension-module` option. diff --git a/changelog.d/14968.misc b/changelog.d/14968.misc deleted file mode 100644 index a96c977f3b19..000000000000 --- a/changelog.d/14968.misc +++ /dev/null @@ -1 +0,0 @@ -Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. diff --git a/changelog.d/14970.misc b/changelog.d/14970.misc deleted file mode 100644 index 365762360285..000000000000 --- a/changelog.d/14970.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of `/sync` in a few situations. diff --git a/changelog.d/14971.misc b/changelog.d/14971.misc deleted file mode 100644 index 130045a1237f..000000000000 --- a/changelog.d/14971.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of joining and leaving large rooms with many local users. diff --git a/changelog.d/14976.bugfix b/changelog.d/14976.bugfix deleted file mode 100644 index 0cde046c0e00..000000000000 --- a/changelog.d/14976.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.68.0 where logging from the Rust module was not properly logged. diff --git a/changelog.d/14979.misc b/changelog.d/14979.misc deleted file mode 100644 index c09911e48d2f..000000000000 --- a/changelog.d/14979.misc +++ /dev/null @@ -1 +0,0 @@ -Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/14981.misc b/changelog.d/14981.misc deleted file mode 100644 index 68ac8335fc4c..000000000000 --- a/changelog.d/14981.misc +++ /dev/null @@ -1 +0,0 @@ -Add tests for `_flatten_dict`. diff --git a/changelog.d/14983.misc b/changelog.d/14983.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/14983.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/14984.misc b/changelog.d/14984.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/14984.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/14985.misc b/changelog.d/14985.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/14985.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/14987.misc b/changelog.d/14987.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/14987.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/14988.misc b/changelog.d/14988.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/14988.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/14990.misc b/changelog.d/14990.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/14990.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/14991.misc b/changelog.d/14991.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/14991.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/14992.misc b/changelog.d/14992.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/14992.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/14993.misc b/changelog.d/14993.misc deleted file mode 100644 index 77f3528e2b9d..000000000000 --- a/changelog.d/14993.misc +++ /dev/null @@ -1 +0,0 @@ -Bump hiredis from 2.1.1 to 2.2.1. diff --git a/changelog.d/14994.misc b/changelog.d/14994.misc deleted file mode 100644 index 7ca29f1ca920..000000000000 --- a/changelog.d/14994.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 65.6.0.3 to 67.1.0.0. diff --git a/changelog.d/14995.misc b/changelog.d/14995.misc deleted file mode 100644 index 44af2821a641..000000000000 --- a/changelog.d/14995.misc +++ /dev/null @@ -1 +0,0 @@ -Bump prometheus-client from 0.15.0 to 0.16.0. diff --git a/changelog.d/14996.misc b/changelog.d/14996.misc deleted file mode 100644 index 7ccef81e8288..000000000000 --- a/changelog.d/14996.misc +++ /dev/null @@ -1 +0,0 @@ -Bump anyhow from 1.0.68 to 1.0.69. diff --git a/changelog.d/14997.misc b/changelog.d/14997.misc deleted file mode 100644 index 2ae4582dabd4..000000000000 --- a/changelog.d/14997.misc +++ /dev/null @@ -1 +0,0 @@ -Bump serde_json from 1.0.91 to 1.0.92. diff --git a/changelog.d/14998.misc b/changelog.d/14998.misc deleted file mode 100644 index f191ae22ac51..000000000000 --- a/changelog.d/14998.misc +++ /dev/null @@ -1 +0,0 @@ -Bump isort from 5.11.4 to 5.11.5. diff --git a/changelog.d/14999.misc b/changelog.d/14999.misc deleted file mode 100644 index f03f9239e825..000000000000 --- a/changelog.d/14999.misc +++ /dev/null @@ -1 +0,0 @@ -Bump phonenumbers from 8.13.4 to 8.13.5. diff --git a/changelog.d/15002.misc b/changelog.d/15002.misc deleted file mode 100644 index 68ac8335fc4c..000000000000 --- a/changelog.d/15002.misc +++ /dev/null @@ -1 +0,0 @@ -Add tests for `_flatten_dict`. diff --git a/changelog.d/15007.misc b/changelog.d/15007.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15007.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/debian/changelog b/debian/changelog index c678ae6c5c7e..becc41c1217a 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.77.0~rc1) stable; urgency=medium + + * New Synapse release 1.77.0rc1. + + -- Synapse Packaging team Tue, 07 Feb 2023 13:45:14 +0000 + matrix-synapse-py3 (1.76.0) stable; urgency=medium * New Synapse release 1.76.0. diff --git a/pyproject.toml b/pyproject.toml index 12734dec710e..1e59706104ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.76.0" +version = "1.77.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 2dff93099b5aa7e213da76a9c4b3de84385b58e1 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Feb 2023 15:24:44 +0000 Subject: [PATCH 138/344] Typecheck tests.rest.media.v1.test_media_storage (#15008) * Fix MediaStorage type hint * Typecheck tests.rest.media.v1.test_media_storage * Changelog * Remove assert and make the comment succinct * Fix syntax for olddeps --- changelog.d/15008.misc | 1 + mypy.ini | 1 - synapse/rest/media/v1/media_storage.py | 7 ++-- tests/rest/media/v1/test_media_storage.py | 49 ++++++++++++++--------- 4 files changed, 35 insertions(+), 23 deletions(-) create mode 100644 changelog.d/15008.misc diff --git a/changelog.d/15008.misc b/changelog.d/15008.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15008.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 0efafb26b6a6..4598002c4af3 100644 --- a/mypy.ini +++ b/mypy.ini @@ -33,7 +33,6 @@ exclude = (?x) |synapse/storage/schema/ |tests/module_api/test_api.py - |tests/rest/media/v1/test_media_storage.py |tests/server.py )$ diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index a5c3de192f6f..db25848744de 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -46,10 +46,9 @@ from .filepath import MediaFilePaths if TYPE_CHECKING: + from synapse.rest.media.v1.storage_provider import StorageProvider from synapse.server import HomeServer - from .storage_provider import StorageProviderWrapper - logger = logging.getLogger(__name__) @@ -68,7 +67,7 @@ def __init__( hs: "HomeServer", local_media_directory: str, filepaths: MediaFilePaths, - storage_providers: Sequence["StorageProviderWrapper"], + storage_providers: Sequence["StorageProvider"], ): self.hs = hs self.reactor = hs.get_reactor() @@ -360,7 +359,7 @@ class ReadableFileWrapper: clock: Clock path: str - async def write_chunks_to(self, callback: Callable[[bytes], None]) -> None: + async def write_chunks_to(self, callback: Callable[[bytes], object]) -> None: """Reads the file in chunks and calls the callback with each chunk.""" with open(self.path, "rb") as file: diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index d18fc13c21d6..17a3b06a8e25 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -16,7 +16,7 @@ import tempfile from binascii import unhexlify from io import BytesIO -from typing import Any, BinaryIO, Dict, List, Optional, Union +from typing import Any, BinaryIO, ClassVar, Dict, List, Optional, Tuple, Union from unittest.mock import Mock from urllib import parse @@ -32,6 +32,7 @@ from synapse.api.errors import Codes from synapse.events import EventBase from synapse.events.spamcheck import load_legacy_spam_checkers +from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable from synapse.module_api import ModuleApi from synapse.rest import admin @@ -41,7 +42,7 @@ from synapse.rest.media.v1.media_storage import MediaStorage, ReadableFileWrapper from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend from synapse.server import HomeServer -from synapse.types import RoomAlias +from synapse.types import JsonDict, RoomAlias from synapse.util import Clock from tests import unittest @@ -201,36 +202,46 @@ class _TestImage: ], ) class MediaRepoTests(unittest.HomeserverTestCase): - + test_image: ClassVar[_TestImage] hijack_auth = True user_id = "@test:user" def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.fetches = [] + self.fetches: List[ + Tuple[ + "Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]]", + str, + str, + Optional[QueryParams], + ] + ] = [] def get_file( destination: str, path: str, output_stream: BinaryIO, - args: Optional[Dict[str, Union[str, List[str]]]] = None, + args: Optional[QueryParams] = None, + retry_on_dns_fail: bool = True, max_size: Optional[int] = None, - ) -> Deferred: - """ - Returns tuple[int,dict,str,int] of file length, response headers, - absolute URI, and response code. - """ + ignore_backoff: bool = False, + ) -> "Deferred[Tuple[int, Dict[bytes, List[bytes]]]]": + """A mock for MatrixFederationHttpClient.get_file.""" - def write_to(r): + def write_to( + r: Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]] + ) -> Tuple[int, Dict[bytes, List[bytes]]]: data, response = r output_stream.write(data) return response - d = Deferred() - d.addCallback(write_to) + d: Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]] = Deferred() self.fetches.append((d, destination, path, args)) - return make_deferred_yieldable(d) + # Note that this callback changes the value held by d. + d_after_callback = d.addCallback(write_to) + return make_deferred_yieldable(d_after_callback) + # Mock out the homeserver's MatrixFederationHttpClient client = Mock() client.get_file = get_file @@ -461,6 +472,7 @@ def test_thumbnail_repeated_thumbnail(self) -> None: # Synapse should regenerate missing thumbnails. origin, media_id = self.media_id.split("/") info = self.get_success(self.store.get_cached_remote_media(origin, media_id)) + assert info is not None file_id = info["filesystem_id"] thumbnail_dir = self.media_repo.filepaths.remote_media_thumbnail_dir( @@ -581,7 +593,7 @@ def test_same_quality(self, method: str, desired_size: int) -> None: "thumbnail_method": method, "thumbnail_type": self.test_image.content_type, "thumbnail_length": 256, - "filesystem_id": f"thumbnail1{self.test_image.extension}", + "filesystem_id": f"thumbnail1{self.test_image.extension.decode()}", }, { "thumbnail_width": 32, @@ -589,10 +601,10 @@ def test_same_quality(self, method: str, desired_size: int) -> None: "thumbnail_method": method, "thumbnail_type": self.test_image.content_type, "thumbnail_length": 256, - "filesystem_id": f"thumbnail2{self.test_image.extension}", + "filesystem_id": f"thumbnail2{self.test_image.extension.decode()}", }, ], - file_id=f"image{self.test_image.extension}", + file_id=f"image{self.test_image.extension.decode()}", url_cache=None, server_name=None, ) @@ -637,6 +649,7 @@ def __init__(self, config: Dict[str, Any], api: ModuleApi) -> None: self.config = config self.api = api + @staticmethod def parse_config(config: Dict[str, Any]) -> Dict[str, Any]: return config @@ -748,7 +761,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: async def check_media_file_for_spam( self, file_wrapper: ReadableFileWrapper, file_info: FileInfo - ) -> Union[Codes, Literal["NOT_SPAM"]]: + ) -> Union[Codes, Literal["NOT_SPAM"], Tuple[Codes, JsonDict]]: buf = BytesIO() await file_wrapper.write_chunks_to(buf.write) From 9cd7610f86ab5051c9365dd38d1eec405a5f8ca6 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Feb 2023 15:26:55 +0000 Subject: [PATCH 139/344] Revert "Add `event_stream_ordering` column to membership state tables (#14979)" This reverts commit 5fdc12f482c68e2cdbb78d7db5de2cfe621720d4. --- synapse/storage/databases/main/events.py | 23 +--- .../databases/main/events_bg_updates.py | 104 +----------------- .../storage/databases/main/events_worker.py | 8 +- ...embership_tables_event_stream_ordering.sql | 21 ---- 4 files changed, 11 insertions(+), 145 deletions(-) delete mode 100644 synapse/storage/schema/main/delta/73/26membership_tables_event_stream_ordering.sql diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index b6cce0a7ccf1..1536937b67e8 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1147,15 +1147,11 @@ def _update_current_state_txn( # been inserted into room_memberships. txn.execute_batch( """INSERT INTO current_state_events - (room_id, type, state_key, event_id, membership, event_stream_ordering) - VALUES ( - ?, ?, ?, ?, - (SELECT membership FROM room_memberships WHERE event_id = ?), - (SELECT stream_ordering FROM events WHERE event_id = ?) - ) + (room_id, type, state_key, event_id, membership) + VALUES (?, ?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?)) """, [ - (room_id, key[0], key[1], ev_id, ev_id, ev_id) + (room_id, key[0], key[1], ev_id, ev_id) for key, ev_id in to_insert.items() ], ) @@ -1182,15 +1178,11 @@ def _update_current_state_txn( if to_insert: txn.execute_batch( """INSERT INTO local_current_membership - (room_id, user_id, event_id, membership, event_stream_ordering) - VALUES ( - ?, ?, ?, - (SELECT membership FROM room_memberships WHERE event_id = ?), - (SELECT stream_ordering FROM events WHERE event_id = ?) - ) + (room_id, user_id, event_id, membership) + VALUES (?, ?, ?, (SELECT membership FROM room_memberships WHERE event_id = ?)) """, [ - (room_id, key[1], ev_id, ev_id, ev_id) + (room_id, key[1], ev_id, ev_id) for key, ev_id in to_insert.items() if key[0] == EventTypes.Member and self.is_mine_id(key[1]) ], @@ -1798,7 +1790,6 @@ def _store_room_members_txn( table="room_memberships", keys=( "event_id", - "event_stream_ordering", "user_id", "sender", "room_id", @@ -1809,7 +1800,6 @@ def _store_room_members_txn( values=[ ( event.event_id, - event.internal_metadata.stream_ordering, event.state_key, event.user_id, event.room_id, @@ -1842,7 +1832,6 @@ def _store_room_members_txn( keyvalues={"room_id": event.room_id, "user_id": event.state_key}, values={ "event_id": event.event_id, - "event_stream_ordering": event.internal_metadata.stream_ordering, "membership": event.membership, }, ) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 0e81d38ccaee..b9d3c36d602c 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -17,7 +17,7 @@ import attr -from synapse.api.constants import EventContentFields, EventTypes, RelationTypes +from synapse.api.constants import EventContentFields, RelationTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import make_event_from_dict from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause @@ -71,10 +71,6 @@ class _BackgroundUpdates: EVENTS_JUMP_TO_DATE_INDEX = "events_jump_to_date_index" - POPULATE_MEMBERSHIP_EVENT_STREAM_ORDERING = ( - "populate_membership_event_stream_ordering" - ) - @attr.s(slots=True, frozen=True, auto_attribs=True) class _CalculateChainCover: @@ -103,10 +99,6 @@ def __init__( ): super().__init__(database, db_conn, hs) - self.db_pool.updates.register_background_update_handler( - _BackgroundUpdates.POPULATE_MEMBERSHIP_EVENT_STREAM_ORDERING, - self._populate_membership_event_stream_ordering, - ) self.db_pool.updates.register_background_update_handler( _BackgroundUpdates.EVENT_ORIGIN_SERVER_TS_NAME, self._background_reindex_origin_server_ts, @@ -1506,97 +1498,3 @@ def _populate_txn(txn: LoggingTransaction) -> bool: ) return batch_size - - async def _populate_membership_event_stream_ordering( - self, progress: JsonDict, batch_size: int - ) -> int: - def _populate_membership_event_stream_ordering( - txn: LoggingTransaction, - ) -> bool: - - if "max_stream_ordering" in progress: - max_stream_ordering = progress["max_stream_ordering"] - else: - txn.execute("SELECT max(stream_ordering) FROM events") - res = txn.fetchone() - if res is None or res[0] is None: - return True - else: - max_stream_ordering = res[0] - - start = progress.get("stream_ordering", 0) - stop = start + batch_size - - sql = f""" - SELECT room_id, event_id, stream_ordering - FROM events - WHERE - type = '{EventTypes.Member}' - AND stream_ordering >= ? - AND stream_ordering < ? - """ - txn.execute(sql, (start, stop)) - - rows: List[Tuple[str, str, int]] = cast( - List[Tuple[str, str, int]], txn.fetchall() - ) - - event_ids: List[Tuple[str]] = [] - event_stream_orderings: List[Tuple[int]] = [] - - for _, event_id, event_stream_ordering in rows: - event_ids.append((event_id,)) - event_stream_orderings.append((event_stream_ordering,)) - - self.db_pool.simple_update_many_txn( - txn, - table="current_state_events", - key_names=("event_id",), - key_values=event_ids, - value_names=("event_stream_ordering",), - value_values=event_stream_orderings, - ) - - self.db_pool.simple_update_many_txn( - txn, - table="room_memberships", - key_names=("event_id",), - key_values=event_ids, - value_names=("event_stream_ordering",), - value_values=event_stream_orderings, - ) - - # NOTE: local_current_membership has no index on event_id, so only - # the room ID here will reduce the query rows read. - for room_id, event_id, event_stream_ordering in rows: - txn.execute( - """ - UPDATE local_current_membership - SET event_stream_ordering = ? - WHERE room_id = ? AND event_id = ? - """, - (event_stream_ordering, room_id, event_id), - ) - - self.db_pool.updates._background_update_progress_txn( - txn, - _BackgroundUpdates.POPULATE_MEMBERSHIP_EVENT_STREAM_ORDERING, - { - "stream_ordering": stop, - "max_stream_ordering": max_stream_ordering, - }, - ) - - return stop > max_stream_ordering - - finished = await self.db_pool.runInteraction( - "_populate_membership_event_stream_ordering", - _populate_membership_event_stream_ordering, - ) - - if finished: - await self.db_pool.updates._end_background_update( - _BackgroundUpdates.POPULATE_MEMBERSHIP_EVENT_STREAM_ORDERING - ) - - return batch_size diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 6d0ef10258de..d7d08369cacd 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1779,7 +1779,7 @@ def get_ex_outlier_stream_rows_txn( txn: LoggingTransaction, ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: sql = ( - "SELECT out.event_stream_ordering, e.event_id, e.room_id, e.type," + "SELECT event_stream_ordering, e.event_id, e.room_id, e.type," " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL," " e.outlier" " FROM events AS e" @@ -1791,10 +1791,10 @@ def get_ex_outlier_stream_rows_txn( " LEFT JOIN event_relations USING (event_id)" " LEFT JOIN room_memberships USING (event_id)" " LEFT JOIN rejections USING (event_id)" - " WHERE ? < out.event_stream_ordering" - " AND out.event_stream_ordering <= ?" + " WHERE ? < event_stream_ordering" + " AND event_stream_ordering <= ?" " AND out.instance_name = ?" - " ORDER BY out.event_stream_ordering ASC" + " ORDER BY event_stream_ordering ASC" ) txn.execute(sql, (last_id, current_id, instance_name)) diff --git a/synapse/storage/schema/main/delta/73/26membership_tables_event_stream_ordering.sql b/synapse/storage/schema/main/delta/73/26membership_tables_event_stream_ordering.sql deleted file mode 100644 index 7c30a67fc4d1..000000000000 --- a/synapse/storage/schema/main/delta/73/26membership_tables_event_stream_ordering.sql +++ /dev/null @@ -1,21 +0,0 @@ -/* Copyright 2022 Beeper - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -ALTER TABLE current_state_events ADD COLUMN event_stream_ordering BIGINT; -ALTER TABLE local_current_membership ADD COLUMN event_stream_ordering BIGINT; -ALTER TABLE room_memberships ADD COLUMN event_stream_ordering BIGINT; - -INSERT INTO background_updates (update_name, progress_json) VALUES - ('populate_membership_event_stream_ordering', '{}'); From f10caa73eee0caa91cf373966104d1ededae2aee Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Feb 2023 15:33:33 +0000 Subject: [PATCH 140/344] Disambiguate `get_ex_outlier_stream_rows` query A backwards-compatible piece of #14979 that's safe to land now. --- synapse/storage/databases/main/events_worker.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index d7d08369cacd..6d0ef10258de 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1779,7 +1779,7 @@ def get_ex_outlier_stream_rows_txn( txn: LoggingTransaction, ) -> List[Tuple[int, str, str, str, str, str, str, str, bool, bool]]: sql = ( - "SELECT event_stream_ordering, e.event_id, e.room_id, e.type," + "SELECT out.event_stream_ordering, e.event_id, e.room_id, e.type," " se.state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL," " e.outlier" " FROM events AS e" @@ -1791,10 +1791,10 @@ def get_ex_outlier_stream_rows_txn( " LEFT JOIN event_relations USING (event_id)" " LEFT JOIN room_memberships USING (event_id)" " LEFT JOIN rejections USING (event_id)" - " WHERE ? < event_stream_ordering" - " AND event_stream_ordering <= ?" + " WHERE ? < out.event_stream_ordering" + " AND out.event_stream_ordering <= ?" " AND out.instance_name = ?" - " ORDER BY event_stream_ordering ASC" + " ORDER BY out.event_stream_ordering ASC" ) txn.execute(sql, (last_id, current_id, instance_name)) From 4142dca7188656647a5bfcbb593a58af721ebbf2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Feb 2023 18:11:16 -0500 Subject: [PATCH 141/344] Include no actions instead of dont_notify for suppressing edits. (#15016) --- changelog.d/15016.feature | 1 + rust/src/push/base_rules.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15016.feature diff --git a/changelog.d/15016.feature b/changelog.d/15016.feature new file mode 100644 index 000000000000..b9bb331273a7 --- /dev/null +++ b/changelog.d/15016.feature @@ -0,0 +1 @@ +Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index e9af26dd4f8f..97d0a0a7e220 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -76,7 +76,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ pattern_type: None, }, ))]), - actions: Cow::Borrowed(&[Action::DontNotify]), + actions: Cow::Borrowed(&[]), default: true, default_enabled: true, }, From 0c29f5fbb42cd92acd122d20a962b93b6ae020c2 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Feb 2023 23:55:22 +0000 Subject: [PATCH 142/344] Hacky fix to make mac wheels (#15019) * Skip testing PyPy wheels One of the test builds on #15015 failed to install a pp38-* wheel because it didn't have access to the openssl headers to build `cryptography` from source. We don't run CI against PyPy so I'm going to be a meanie and skip testing the wheels. (And I've no idea why 3.8 was special in the first place, either.) * Hack the name of the wheel so cibw can test it I hate hate hate hate hate hate hate hate hate this * Changelog * Apply suggestions from code review Co-authored-by: Patrick Cloke --------- Co-authored-by: Patrick Cloke --- .ci/scripts/auditwheel_wrapper.py | 11 ++++++++++- .github/workflows/release-artifacts.yml | 2 +- changelog.d/15019.misc | 1 + 3 files changed, 12 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15019.misc diff --git a/.ci/scripts/auditwheel_wrapper.py b/.ci/scripts/auditwheel_wrapper.py index a33b39314fb8..18cd0a7b528d 100755 --- a/.ci/scripts/auditwheel_wrapper.py +++ b/.ci/scripts/auditwheel_wrapper.py @@ -50,7 +50,16 @@ def cpython(wheel_file: str, name: str, version: Version, tag: Tag) -> str: check_is_abi3_compatible(wheel_file) - abi3_tag = Tag(tag.interpreter, "abi3", tag.platform) + # HACK: it seems that some older versions of pip will consider a wheel marked + # as macosx_11_0 as incompatible with Big Sur. I haven't done the full archaeology + # here; there are some clues in + # https://github.com/pantsbuild/pants/pull/12857 + # https://github.com/pypa/pip/issues/9138 + # https://github.com/pypa/packaging/pull/319 + # Empirically this seems to work, note that macOS 11 and 10.16 are the same, + # both versions are valid for backwards compatibility. + platform = tag.platform.replace("macosx_11_0", "macosx_10_16") + abi3_tag = Tag(tag.interpreter, "abi3", platform) dirname = os.path.dirname(wheel_file) new_wheel_file = os.path.join( diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index f540d2d28b09..bf57bcab6104 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -148,7 +148,7 @@ jobs: env: # Skip testing for platforms which various libraries don't have wheels # for, and so need extra build deps. - CIBW_TEST_SKIP: pp3{7,9}-* *i686* *musl* + CIBW_TEST_SKIP: pp3*-* *i686* *musl* # Fix Rust OOM errors on emulated aarch64: https://github.com/rust-lang/cargo/issues/10583 CARGO_NET_GIT_FETCH_WITH_CLI: true CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI diff --git a/changelog.d/15019.misc b/changelog.d/15019.misc new file mode 100644 index 000000000000..bfc1d0f6ce72 --- /dev/null +++ b/changelog.d/15019.misc @@ -0,0 +1 @@ +Fix creation of wheels on macOS. From 236f6dfc8c5702b12c80ee11c66d8345a462279c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 8 Feb 2023 00:12:22 +0000 Subject: [PATCH 143/344] Manually add new news fragments --- CHANGES.md | 2 ++ changelog.d/15016.feature | 1 - changelog.d/15019.misc | 1 - 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/15016.feature delete mode 100644 changelog.d/15019.misc diff --git a/CHANGES.md b/CHANGES.md index 1951ea931dca..80f41615a0d4 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -8,6 +8,7 @@ Features - Adds profile information, devices and connections to the user data export via command line. ([\#14894](https://github.com/matrix-org/synapse/issues/14894)) - Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#14960](https://github.com/matrix-org/synapse/issues/14960)) - Improve performance when joining or sending an event large rooms. ([\#14962](https://github.com/matrix-org/synapse/issues/14962)) +- Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#15016](https://github.com/matrix-org/synapse/issues/15016)) Bugfixes @@ -56,6 +57,7 @@ Internal Changes - Bump serde_json from 1.0.91 to 1.0.92. ([\#14997](https://github.com/matrix-org/synapse/issues/14997)) - Bump isort from 5.11.4 to 5.11.5. ([\#14998](https://github.com/matrix-org/synapse/issues/14998)) - Bump phonenumbers from 8.13.4 to 8.13.5. ([\#14999](https://github.com/matrix-org/synapse/issues/14999)) +- Fix creation of wheels on macOS. ([\#15019](https://github.com/matrix-org/synapse/issues/15019)) Synapse 1.76.0 (2023-01-31) diff --git a/changelog.d/15016.feature b/changelog.d/15016.feature deleted file mode 100644 index b9bb331273a7..000000000000 --- a/changelog.d/15016.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). diff --git a/changelog.d/15019.misc b/changelog.d/15019.misc deleted file mode 100644 index bfc1d0f6ce72..000000000000 --- a/changelog.d/15019.misc +++ /dev/null @@ -1 +0,0 @@ -Fix creation of wheels on macOS. From 17e0c75eec542c1b65ca9295a0db2e150cf34e6d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 8 Feb 2023 00:31:54 +0000 Subject: [PATCH 144/344] Rearrange items --- CHANGES.md | 47 ++++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 80f41615a0d4..f93b187f012e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,51 +5,53 @@ Features -------- - Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#14823](https://github.com/matrix-org/synapse/issues/14823), [\#14943](https://github.com/matrix-org/synapse/issues/14943), [\#14957](https://github.com/matrix-org/synapse/issues/14957), [\#14958](https://github.com/matrix-org/synapse/issues/14958)) -- Adds profile information, devices and connections to the user data export via command line. ([\#14894](https://github.com/matrix-org/synapse/issues/14894)) -- Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#14960](https://github.com/matrix-org/synapse/issues/14960)) -- Improve performance when joining or sending an event large rooms. ([\#14962](https://github.com/matrix-org/synapse/issues/14962)) -- Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#15016](https://github.com/matrix-org/synapse/issues/15016)) +- Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#14960](https://github.com/matrix-org/synapse/issues/14960), [\#15016](https://github.com/matrix-org/synapse/issues/15016)) +- Add profile information, devices and connections to the command line [user data export tool](https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14894](https://github.com/matrix-org/synapse/issues/14894)) +- Improve performance when joining or sending an event in large rooms. ([\#14962](https://github.com/matrix-org/synapse/issues/14962)) +- Improve performance of joining and leaving large rooms with many local users. ([\#14971](https://github.com/matrix-org/synapse/issues/14971)) Bugfixes -------- - Fix a bug introduced in Synapse 1.53.0 where `next_batch` tokens from `/sync` could not be used with the `/relations` endpoint. ([\#14866](https://github.com/matrix-org/synapse/issues/14866)) -- Fix a bug when using the `send_local_online_presence_to` module API. ([\#14880](https://github.com/matrix-org/synapse/issues/14880)) +- Fix a bug introduced in Synapse 1.35.0 where the module API's `send_local_online_presence_to` would fail to send presence updates over federation. ([\#14880](https://github.com/matrix-org/synapse/issues/14880)) - Fix a bug introduced in Synapse 1.70.0 where the background updates to add non-thread unique indexes on receipts could fail when upgrading from 1.67.0 or earlier. ([\#14915](https://github.com/matrix-org/synapse/issues/14915)) - Fix a regression introduced in Synapse 1.69.0 which can result in database corruption when database migrations are interrupted on sqlite. ([\#14926](https://github.com/matrix-org/synapse/issues/14926)) - Fix a bug introduced in Synapse 1.68.0 where we were unable to service remote joins in rooms with `@room` notification levels set to `null` in their (malformed) power levels. ([\#14942](https://github.com/matrix-org/synapse/issues/14942)) -- Fix a bug introduced in Synapse v1.64 where boolean power levels were erroneously permitted in [v10 rooms](https://spec.matrix.org/v1.5/rooms/v10/). ([\#14944](https://github.com/matrix-org/synapse/issues/14944)) +- Fix a bug introduced in Synapse 1.64.0 where boolean power levels were erroneously permitted in [v10 rooms](https://spec.matrix.org/v1.5/rooms/v10/). ([\#14944](https://github.com/matrix-org/synapse/issues/14944)) - Fix a long-standing bug where sending messages on servers with presence enabled would spam "Re-starting finished log context" log lines. ([\#14947](https://github.com/matrix-org/synapse/issues/14947)) - Fix a bug introduced in Synapse 1.68.0 where logging from the Rust module was not properly logged. ([\#14976](https://github.com/matrix-org/synapse/issues/14976)) +- Fix various long-standing bugs in Synapse's config, event and request handling where booleans were unintentionally accepted where an integer was expected. ([\#14945](https://github.com/matrix-org/synapse/issues/14945)) Internal Changes ---------------- -- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14858](https://github.com/matrix-org/synapse/issues/14858)) -- Add missing type hints. ([\#14879](https://github.com/matrix-org/synapse/issues/14879), [\#14886](https://github.com/matrix-org/synapse/issues/14886), [\#14887](https://github.com/matrix-org/synapse/issues/14887), [\#14904](https://github.com/matrix-org/synapse/issues/14904), [\#14927](https://github.com/matrix-org/synapse/issues/14927), [\#14956](https://github.com/matrix-org/synapse/issues/14956)) +- Add missing type hints. ([\#14879](https://github.com/matrix-org/synapse/issues/14879), [\#14886](https://github.com/matrix-org/synapse/issues/14886), [\#14887](https://github.com/matrix-org/synapse/issues/14887), [\#14904](https://github.com/matrix-org/synapse/issues/14904), [\#14927](https://github.com/matrix-org/synapse/issues/14927), [\#14956](https://github.com/matrix-org/synapse/issues/14956), [\#14983](https://github.com/matrix-org/synapse/issues/14983), [\#14984](https://github.com/matrix-org/synapse/issues/14984), [\#14985](https://github.com/matrix-org/synapse/issues/14985), [\#14987](https://github.com/matrix-org/synapse/issues/14987), [\#14988](https://github.com/matrix-org/synapse/issues/14988), [\#14990](https://github.com/matrix-org/synapse/issues/14990), [\#14991](https://github.com/matrix-org/synapse/issues/14991), [\#14992](https://github.com/matrix-org/synapse/issues/14992), [\#15007](https://github.com/matrix-org/synapse/issues/15007)) +- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14922](https://github.com/matrix-org/synapse/issues/14922)) +- Allow running the complement tests suites with the asyncio reactor enabled. ([\#14858](https://github.com/matrix-org/synapse/issues/14858)) - Improve performance of `/sync` in a few situations. ([\#14908](https://github.com/matrix-org/synapse/issues/14908), [\#14970](https://github.com/matrix-org/synapse/issues/14970)) - Document how to handle Dependabot pull requests. ([\#14916](https://github.com/matrix-org/synapse/issues/14916)) - Fix typo in release script. ([\#14920](https://github.com/matrix-org/synapse/issues/14920)) -- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14922](https://github.com/matrix-org/synapse/issues/14922)) +- Update build system requirements to allow building with poetry-core 1.5.0. ([\#14949](https://github.com/matrix-org/synapse/issues/14949)) +- Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. ([\#14953](https://github.com/matrix-org/synapse/issues/14953)) +- Faster joins: Refactor internal handling of servers in room to never store an empty list. ([\#14954](https://github.com/matrix-org/synapse/issues/14954)) +- Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. ([\#14950](https://github.com/matrix-org/synapse/issues/14950)) +- Allow running `cargo` without the `extension-module` option. ([\#14965](https://github.com/matrix-org/synapse/issues/14965)) +- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979)) +- Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002)) +- Fix creation of wheels on macOS. ([\#15019](https://github.com/matrix-org/synapse/issues/15019)) + +
Dependabot updates + +- Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968)) +- Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952)) - Bump ijson from 3.1.4 to 3.2.0.post0. ([\#14935](https://github.com/matrix-org/synapse/issues/14935)) - Bump types-pyyaml from 6.0.12.2 to 6.0.12.3. ([\#14936](https://github.com/matrix-org/synapse/issues/14936)) - Bump types-jsonschema from 4.17.0.2 to 4.17.0.3. ([\#14937](https://github.com/matrix-org/synapse/issues/14937)) - Bump types-pillow from 9.4.0.3 to 9.4.0.5. ([\#14938](https://github.com/matrix-org/synapse/issues/14938)) - Bump hiredis from 2.0.0 to 2.1.1. ([\#14939](https://github.com/matrix-org/synapse/issues/14939)) -- Fix various long-standing bugs in Synapse's config, event and request handling where booleans were unintentionally accepted where an integer was expected. ([\#14945](https://github.com/matrix-org/synapse/issues/14945)) -- Update build system requirements to allow building with poetry-core==1.5.0. ([\#14949](https://github.com/matrix-org/synapse/issues/14949)) -- Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. ([\#14950](https://github.com/matrix-org/synapse/issues/14950)) -- Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952)) -- Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. ([\#14953](https://github.com/matrix-org/synapse/issues/14953)) -- Faster room joins: Refactor internal handling of servers in room to never store an empty list. ([\#14954](https://github.com/matrix-org/synapse/issues/14954)) -- Allow running `cargo` without the `extension-module` option. ([\#14965](https://github.com/matrix-org/synapse/issues/14965)) -- Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968)) -- Improve performance of joining and leaving large rooms with many local users. ([\#14971](https://github.com/matrix-org/synapse/issues/14971)) -- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979)) -- Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002)) -- Improve type hints. ([\#14983](https://github.com/matrix-org/synapse/issues/14983), [\#14984](https://github.com/matrix-org/synapse/issues/14984), [\#14985](https://github.com/matrix-org/synapse/issues/14985), [\#14987](https://github.com/matrix-org/synapse/issues/14987), [\#14988](https://github.com/matrix-org/synapse/issues/14988), [\#14990](https://github.com/matrix-org/synapse/issues/14990), [\#14991](https://github.com/matrix-org/synapse/issues/14991), [\#14992](https://github.com/matrix-org/synapse/issues/14992), [\#15007](https://github.com/matrix-org/synapse/issues/15007)) - Bump hiredis from 2.1.1 to 2.2.1. ([\#14993](https://github.com/matrix-org/synapse/issues/14993)) - Bump types-setuptools from 65.6.0.3 to 67.1.0.0. ([\#14994](https://github.com/matrix-org/synapse/issues/14994)) - Bump prometheus-client from 0.15.0 to 0.16.0. ([\#14995](https://github.com/matrix-org/synapse/issues/14995)) @@ -57,8 +59,7 @@ Internal Changes - Bump serde_json from 1.0.91 to 1.0.92. ([\#14997](https://github.com/matrix-org/synapse/issues/14997)) - Bump isort from 5.11.4 to 5.11.5. ([\#14998](https://github.com/matrix-org/synapse/issues/14998)) - Bump phonenumbers from 8.13.4 to 8.13.5. ([\#14999](https://github.com/matrix-org/synapse/issues/14999)) -- Fix creation of wheels on macOS. ([\#15019](https://github.com/matrix-org/synapse/issues/15019)) - +
Synapse 1.76.0 (2023-01-31) =========================== From b36c9159139bf6c716954588156fd20eea974987 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 8 Feb 2023 00:32:38 +0000 Subject: [PATCH 145/344] Merge the osx wheel fix with poetry-core 1.5.0 --- CHANGES.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index f93b187f012e..9768940694c9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -34,14 +34,13 @@ Internal Changes - Improve performance of `/sync` in a few situations. ([\#14908](https://github.com/matrix-org/synapse/issues/14908), [\#14970](https://github.com/matrix-org/synapse/issues/14970)) - Document how to handle Dependabot pull requests. ([\#14916](https://github.com/matrix-org/synapse/issues/14916)) - Fix typo in release script. ([\#14920](https://github.com/matrix-org/synapse/issues/14920)) -- Update build system requirements to allow building with poetry-core 1.5.0. ([\#14949](https://github.com/matrix-org/synapse/issues/14949)) +- Update build system requirements to allow building with poetry-core 1.5.0. ([\#14949](https://github.com/matrix-org/synapse/issues/14949), [\#15019](https://github.com/matrix-org/synapse/issues/15019)) - Add an [lnav](https://lnav.org) config file for Synapse logs to `/contrib/lnav`. ([\#14953](https://github.com/matrix-org/synapse/issues/14953)) - Faster joins: Refactor internal handling of servers in room to never store an empty list. ([\#14954](https://github.com/matrix-org/synapse/issues/14954)) - Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. ([\#14950](https://github.com/matrix-org/synapse/issues/14950)) - Allow running `cargo` without the `extension-module` option. ([\#14965](https://github.com/matrix-org/synapse/issues/14965)) - Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979)) - Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002)) -- Fix creation of wheels on macOS. ([\#15019](https://github.com/matrix-org/synapse/issues/15019))
Dependabot updates From b7672b4a972e9484ac0146277619ef78cb51b156 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 8 Feb 2023 00:37:23 +0000 Subject: [PATCH 146/344] Note the revert (that I didn't PR) --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 9768940694c9..b57586d0111d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -39,7 +39,7 @@ Internal Changes - Faster joins: Refactor internal handling of servers in room to never store an empty list. ([\#14954](https://github.com/matrix-org/synapse/issues/14954)) - Faster joins: tag `v2/send_join/` requests to indicate if they served a partial join response. ([\#14950](https://github.com/matrix-org/synapse/issues/14950)) - Allow running `cargo` without the `extension-module` option. ([\#14965](https://github.com/matrix-org/synapse/issues/14965)) -- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979)) +- Preparatory work for adding a denormalised event stream ordering column in the future. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979), [9cd7610](https://github.com/matrix-org/synapse/commit/9cd7610f86ab5051c9365dd38d1eec405a5f8ca6), [f10caa7](https://github.com/matrix-org/synapse/commit/f10caa73eee0caa91cf373966104d1ededae2aee); see [\#15014](https://github.com/matrix-org/synapse/issues/15014)) - Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002))
Dependabot updates From d83178a33a4a84e5a3c0fed6aae9b5e56e65817a Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 8 Feb 2023 00:39:19 +0000 Subject: [PATCH 147/344] Permalink to the docs --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index b57586d0111d..d6bcacef5f32 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -6,7 +6,7 @@ Features - Experimental support for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952): intentional mentions. ([\#14823](https://github.com/matrix-org/synapse/issues/14823), [\#14943](https://github.com/matrix-org/synapse/issues/14943), [\#14957](https://github.com/matrix-org/synapse/issues/14957), [\#14958](https://github.com/matrix-org/synapse/issues/14958)) - Experimental support to suppress notifications from message edits ([MSC3958](https://github.com/matrix-org/matrix-spec-proposals/pull/3958)). ([\#14960](https://github.com/matrix-org/synapse/issues/14960), [\#15016](https://github.com/matrix-org/synapse/issues/15016)) -- Add profile information, devices and connections to the command line [user data export tool](https://matrix-org.github.io/synapse/latest/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14894](https://github.com/matrix-org/synapse/issues/14894)) +- Add profile information, devices and connections to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.77/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14894](https://github.com/matrix-org/synapse/issues/14894)) - Improve performance when joining or sending an event in large rooms. ([\#14962](https://github.com/matrix-org/synapse/issues/14962)) - Improve performance of joining and leaving large rooms with many local users. ([\#14971](https://github.com/matrix-org/synapse/issues/14971)) From 85d93d003ced572aac4b410ac42bb14a5e1bc66c Mon Sep 17 00:00:00 2001 From: William Kray Date: Wed, 8 Feb 2023 02:44:19 -0800 Subject: [PATCH 148/344] Clarify limitations of SRV delegation in documentation (#14959) This PR just clarifies in the SRV DNS delegation document that there are still cases a user may have to serve files from `.well-known` endpoints, and this may not be a valid case for using SRV delegation. This has caused some confusion in a few cases. Signed-off-by: William Kray --- changelog.d/14959.doc | 1 + docs/delegate.md | 9 +++++++++ 2 files changed, 10 insertions(+) create mode 100644 changelog.d/14959.doc diff --git a/changelog.d/14959.doc b/changelog.d/14959.doc new file mode 100644 index 000000000000..45edf1a76520 --- /dev/null +++ b/changelog.d/14959.doc @@ -0,0 +1 @@ +Update delegation documentation to clarify that SRV DNS delegation does not eliminate all needs to serve files from .well-known locations. Contributed by @williamkray. diff --git a/docs/delegate.md b/docs/delegate.md index ee9cbb3b1cfc..aee82fcb9ab3 100644 --- a/docs/delegate.md +++ b/docs/delegate.md @@ -73,6 +73,15 @@ It is also possible to do delegation using a SRV DNS record. However, that is ge not recommended, as it can be difficult to configure the TLS certificates correctly in this case, and it offers little advantage over `.well-known` delegation. +Please keep in mind that server delegation is a function of server-server communication, +and as such using SRV DNS records will not cover use cases involving client-server comms. +This means setting global client settings (such as a Jitsi endpoint, or disabling +creating new rooms as encrypted by default, etc) will still require that you serve a file +from the `https:///.well-known/` endpoints defined in the spec! If you are +considering using SRV DNS delegation to avoid serving files from this endpoint, consider +the impact that you will not be able to change those client-based default values globally, +and will be relegated to the featureset of the configuration of each individual client. + However, if you really need it, you can find some documentation on what such a record should look like and how Synapse will use it in [the Matrix specification](https://matrix.org/docs/spec/server_server/latest#resolving-server-names). From 22aff546d441f39dd94f3a4ca2e700a76e540b04 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Feb 2023 11:26:10 +0000 Subject: [PATCH 149/344] Bump cryptography from 38.0.4 to 39.0.1 (#15020) * Bump cryptography from 38.0.4 to 39.0.1 Bumps [cryptography](https://github.com/pyca/cryptography) from 38.0.4 to 39.0.1. - [Release notes](https://github.com/pyca/cryptography/releases) - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/38.0.4...39.0.1) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15020.misc | 1 + poetry.lock | 57 ++++++++++++++++++++---------------------- 2 files changed, 28 insertions(+), 30 deletions(-) create mode 100644 changelog.d/15020.misc diff --git a/changelog.d/15020.misc b/changelog.d/15020.misc new file mode 100644 index 000000000000..c5290283f0b5 --- /dev/null +++ b/changelog.d/15020.misc @@ -0,0 +1 @@ +Bump cryptography from 38.0.4 to 39.0.1. diff --git a/poetry.lock b/poetry.lock index 71095c21ed9d..ba7b3a5d5dec 100644 --- a/poetry.lock +++ b/poetry.lock @@ -339,50 +339,47 @@ files = [ [[package]] name = "cryptography" -version = "38.0.4" +version = "39.0.1" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:2fa36a7b2cc0998a3a4d5af26ccb6273f3df133d61da2ba13b3286261e7efb70"}, - {file = "cryptography-38.0.4-cp36-abi3-macosx_10_10_x86_64.whl", hash = "sha256:1f13ddda26a04c06eb57119caf27a524ccae20533729f4b1e4a69b54e07035eb"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:2ec2a8714dd005949d4019195d72abed84198d877112abb5a27740e217e0ea8d"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50a1494ed0c3f5b4d07650a68cd6ca62efe8b596ce743a5c94403e6f11bf06c1"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a10498349d4c8eab7357a8f9aa3463791292845b79597ad1b98a543686fb1ec8"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:10652dd7282de17990b88679cb82f832752c4e8237f0c714be518044269415db"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bfe6472507986613dc6cc00b3d492b2f7564b02b3b3682d25ca7f40fa3fd321b"}, - {file = "cryptography-38.0.4-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ce127dd0a6a0811c251a6cddd014d292728484e530d80e872ad9806cfb1c5b3c"}, - {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:53049f3379ef05182864d13bb9686657659407148f901f3f1eee57a733fb4b00"}, - {file = "cryptography-38.0.4-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:8a4b2bdb68a447fadebfd7d24855758fe2d6fecc7fed0b78d190b1af39a8e3b0"}, - {file = "cryptography-38.0.4-cp36-abi3-win32.whl", hash = "sha256:1d7e632804a248103b60b16fb145e8df0bc60eed790ece0d12efe8cd3f3e7744"}, - {file = "cryptography-38.0.4-cp36-abi3-win_amd64.whl", hash = "sha256:8e45653fb97eb2f20b8c96f9cd2b3a0654d742b47d638cf2897afbd97f80fa6d"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca57eb3ddaccd1112c18fc80abe41db443cc2e9dcb1917078e02dfa010a4f353"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:c9e0d79ee4c56d841bd4ac6e7697c8ff3c8d6da67379057f29e66acffcd1e9a7"}, - {file = "cryptography-38.0.4-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:0e70da4bdff7601b0ef48e6348339e490ebfb0cbe638e083c9c41fb49f00c8bd"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-macosx_10_10_x86_64.whl", hash = "sha256:998cd19189d8a747b226d24c0207fdaa1e6658a1d3f2494541cb9dfbf7dcb6d2"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67461b5ebca2e4c2ab991733f8ab637a7265bb582f07c7c88914b5afb88cb95b"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4eb85075437f0b1fd8cd66c688469a0c4119e0ba855e3fef86691971b887caf6"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3178d46f363d4549b9a76264f41c6948752183b3f587666aff0555ac50fd7876"}, - {file = "cryptography-38.0.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6391e59ebe7c62d9902c24a4d8bcbc79a68e7c4ab65863536127c8a9cd94043b"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:78e47e28ddc4ace41dd38c42e6feecfdadf9c3be2af389abbfeef1ff06822285"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fb481682873035600b5502f0015b664abc26466153fab5c6bc92c1ea69d478b"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:4367da5705922cf7070462e964f66e4ac24162e22ab0a2e9d31f1b270dd78083"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b4cad0cea995af760f82820ab4ca54e5471fc782f70a007f31531957f43e9dee"}, - {file = "cryptography-38.0.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:80ca53981ceeb3241998443c4964a387771588c4e4a5d92735a493af868294f9"}, - {file = "cryptography-38.0.4.tar.gz", hash = "sha256:175c1a818b87c9ac80bb7377f5520b7f31b3ef2a0004e2420319beadedb67290"}, + {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965"}, + {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc"}, + {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41"}, + {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505"}, + {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6"}, + {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502"}, + {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f"}, + {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106"}, + {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c"}, + {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4"}, + {file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"}, + {file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"}, + {file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"}, + {file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"}, + {file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"}, + {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"}, + {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e"}, + {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0"}, + {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6"}, + {file = "cryptography-39.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a"}, + {file = "cryptography-39.0.1.tar.gz", hash = "sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695"}, ] [package.dependencies] cffi = ">=1.12" [package.extras] -docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1)", "sphinx-rtd-theme"] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] -pep8test = ["black", "flake8", "flake8-import-order", "pep8-naming"] +pep8test = ["black", "check-manifest", "mypy", "ruff", "types-pytz", "types-requests"] sdist = ["setuptools-rust (>=0.11.4)"] ssh = ["bcrypt (>=3.1.5)"] -test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"] +test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-shard (>=0.1.2)", "pytest-subtests", "pytest-xdist", "pytz"] +test-randomorder = ["pytest-randomly"] +tox = ["tox"] [[package]] name = "defusedxml" From a4126e2861ee9c76d14138c12bd75c83d3e278ee Mon Sep 17 00:00:00 2001 From: Andy Balaam Date: Wed, 8 Feb 2023 12:58:36 +0000 Subject: [PATCH 150/344] Document how to run Synapse (#15022) * Document how to run Synapse * Changelog for 15022 * Update docs/development/contributing_guide.md --- changelog.d/15022.doc | 1 + docs/development/contributing_guide.md | 13 +++++++++++++ 2 files changed, 14 insertions(+) create mode 100644 changelog.d/15022.doc diff --git a/changelog.d/15022.doc b/changelog.d/15022.doc new file mode 100644 index 000000000000..e1627c20cbc9 --- /dev/null +++ b/changelog.d/15022.doc @@ -0,0 +1 @@ +Document how to start Synapse in the contributing guide. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index 36bc88468468..925dcd8933e3 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -78,6 +78,19 @@ poetry install --extras all This will install the runtime and developer dependencies for the project. +## Running Synapse via poetry + +To start a local instance of Synapse in the locked poetry environment, create a config file: + +```sh +cp docs/sample_config.yaml homeserver.yaml +``` + +Now edit homeserver.yaml, and run Synapse with: + +```sh +poetry run python -m synapse.app.homeserver -c homeserver.yaml +``` # 5. Get in touch. From c78c67c5a909c6749f25b251d46be3df8f56f8c2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 8 Feb 2023 17:41:55 +0100 Subject: [PATCH 151/344] Fix bug in replication where response is cached (#15024) --- changelog.d/15024.bugfix | 1 + synapse/replication/http/_base.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/15024.bugfix diff --git a/changelog.d/15024.bugfix b/changelog.d/15024.bugfix new file mode 100644 index 000000000000..dddd406322ee --- /dev/null +++ b/changelog.d/15024.bugfix @@ -0,0 +1 @@ +Fix bug where retried replication requests would return a failure. Introduced in v1.76.0. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index 908f3f1db7da..c20d9c7e9da7 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -426,6 +426,8 @@ async def _check_auth_and_handle( code, response = await self.response_cache.wrap( txn_id, self._handle_request, request, content, **kwargs ) + # Take a copy so we don't mutate things in the cache. + response = dict(response) else: # The `@cancellable` decorator may be applied to `_handle_request`. But we # told `HttpServer.register_paths` that our handler is `_check_auth_and_handle`, From c951fbedcb81895c199c1f4cfe2251d6c3a7b5f4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 8 Feb 2023 13:09:41 -0500 Subject: [PATCH 152/344] MSC3873: Escape keys when flattening dicts. (#15004) This disambiguates keys which attempt to match fields with a dot in them (e.g. m.relates_to). Disabled by default behind an experimental configuration flag. --- changelog.d/15004.feature | 1 + synapse/config/experimental.py | 5 ++++ synapse/push/bulk_push_rule_evaluator.py | 30 ++++++++++++++++++++---- tests/push/test_push_rule_evaluator.py | 8 +++++++ 4 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15004.feature diff --git a/changelog.d/15004.feature b/changelog.d/15004.feature new file mode 100644 index 000000000000..d11d0aca91da --- /dev/null +++ b/changelog.d/15004.feature @@ -0,0 +1 @@ +Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to unambiguate push rule keys with dots in them. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 53c0682dfdb6..5e3a889081b0 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -169,6 +169,11 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC3925: do not replace events with their edits self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False) + # MSC3873: Disambiguate event_match keys. + self.msc3783_escape_event_match_key = experimental.get( + "msc3783_escape_event_match_key", False + ) + # MSC3952: Intentional mentions self.msc3952_intentional_mentions = experimental.get( "msc3952_intentional_mentions", False diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index d9c0a98f44ee..39d2f88f0349 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -271,7 +271,10 @@ async def _related_events(self, event: EventBase) -> Dict[str, Dict[str, str]]: related_event_id, allow_none=True ) if related_event is not None: - related_events[relation_type] = _flatten_dict(related_event) + related_events[relation_type] = _flatten_dict( + related_event, + msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key, + ) reply_event_id = ( event.content.get("m.relates_to", {}) @@ -286,7 +289,10 @@ async def _related_events(self, event: EventBase) -> Dict[str, Dict[str, str]]: ) if related_event is not None: - related_events["m.in_reply_to"] = _flatten_dict(related_event) + related_events["m.in_reply_to"] = _flatten_dict( + related_event, + msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key, + ) # indicate that this is from a fallback relation. if relation_type == "m.thread" and event.content.get( @@ -405,7 +411,10 @@ async def _action_for_event_by_user( room_mention = mentions.get("room") is True evaluator = PushRuleEvaluator( - _flatten_dict(event), + _flatten_dict( + event, + msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key, + ), has_mentions, user_mentions, room_mention, @@ -493,6 +502,8 @@ def _flatten_dict( d: Union[EventBase, Mapping[str, Any]], prefix: Optional[List[str]] = None, result: Optional[Dict[str, str]] = None, + *, + msc3783_escape_event_match_key: bool = False, ) -> Dict[str, str]: """ Given a JSON dictionary (or event) which might contain sub dictionaries, @@ -521,11 +532,22 @@ def _flatten_dict( if result is None: result = {} for key, value in d.items(): + if msc3783_escape_event_match_key: + # Escape periods in the key with a backslash (and backslashes with an + # extra backslash). This is since a period is used as a separator between + # nested fields. + key = key.replace("\\", "\\\\").replace(".", "\\.") + if isinstance(value, str): result[".".join(prefix + [key])] = value.lower() elif isinstance(value, Mapping): # do not set `room_version` due to recursion considerations below - _flatten_dict(value, prefix=(prefix + [key]), result=result) + _flatten_dict( + value, + prefix=(prefix + [key]), + result=result, + msc3783_escape_event_match_key=msc3783_escape_event_match_key, + ) # `room_version` should only ever be set when looking at the top level of an event if ( diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index da3342387121..516b65cc3c97 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -48,6 +48,14 @@ def test_nested(self) -> None: input = {"foo": {"bar": "abc"}} self.assertEqual({"foo.bar": "abc"}, _flatten_dict(input)) + # If a field has a dot in it, escape it. + input = {"m.foo": {"b\\ar": "abc"}} + self.assertEqual({"m.foo.b\\ar": "abc"}, _flatten_dict(input)) + self.assertEqual( + {"m\\.foo.b\\\\ar": "abc"}, + _flatten_dict(input, msc3783_escape_event_match_key=True), + ) + def test_non_string(self) -> None: """Non-string items are dropped.""" input: Dict[str, Any] = { From 975f7ba904347094901cb7badf00977b62b9a254 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 8 Feb 2023 13:49:18 -0500 Subject: [PATCH 153/344] Explicit disabling of disallowed_untyped_defs. (#15026) To make it easier to see which files still need to be fixed. --- changelog.d/15026.misc | 1 + mypy.ini | 83 +++++++++++++----------------------------- 2 files changed, 26 insertions(+), 58 deletions(-) create mode 100644 changelog.d/15026.misc diff --git a/changelog.d/15026.misc b/changelog.d/15026.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15026.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 4598002c4af3..1bdeb18d9496 100644 --- a/mypy.ini +++ b/mypy.ini @@ -60,81 +60,48 @@ disallow_untyped_defs = False [mypy-synapse.storage.database] disallow_untyped_defs = False -[mypy-tests.*] +[mypy-tests.scripts.test_new_matrix_user] disallow_untyped_defs = False -[mypy-tests.api.*] -disallow_untyped_defs = True - -[mypy-tests.app.*] -disallow_untyped_defs = True - -[mypy-tests.appservice.*] -disallow_untyped_defs = True - -[mypy-tests.config.*] -disallow_untyped_defs = True - -[mypy-tests.crypto.*] -disallow_untyped_defs = True - -[mypy-tests.events.*] -disallow_untyped_defs = True - -[mypy-tests.federation.*] -disallow_untyped_defs = True - -[mypy-tests.handlers.*] -disallow_untyped_defs = True - -[mypy-tests.http.*] -disallow_untyped_defs = True - -[mypy-tests.logging.*] -disallow_untyped_defs = True +[mypy-tests.server_notices.test_consent] +disallow_untyped_defs = False -[mypy-tests.metrics.*] -disallow_untyped_defs = True +[mypy-tests.server_notices.test_resource_limits_server_notices] +disallow_untyped_defs = False -[mypy-tests.push.*] -disallow_untyped_defs = True +[mypy-tests.test_distributor] +disallow_untyped_defs = False -[mypy-tests.replication.*] -disallow_untyped_defs = True +[mypy-tests.test_event_auth] +disallow_untyped_defs = False -[mypy-tests.rest.*] -disallow_untyped_defs = True +[mypy-tests.test_federation] +disallow_untyped_defs = False -[mypy-tests.state.test_profile] -disallow_untyped_defs = True +[mypy-tests.test_mau] +disallow_untyped_defs = False -[mypy-tests.storage.*] -disallow_untyped_defs = True +[mypy-tests.test_rust] +disallow_untyped_defs = False -[mypy-tests.test_server] -disallow_untyped_defs = True +[mypy-tests.test_test_utils] +disallow_untyped_defs = False -[mypy-tests.test_state] -disallow_untyped_defs = True +[mypy-tests.test_types] +disallow_untyped_defs = False -[mypy-tests.test_terms_auth] -disallow_untyped_defs = True +[mypy-tests.test_utils.*] +disallow_untyped_defs = False -[mypy-tests.types.*] -disallow_untyped_defs = True +[mypy-tests.test_visibility] +disallow_untyped_defs = False -[mypy-tests.util.caches.*] -disallow_untyped_defs = True +[mypy-tests.unittest] +disallow_untyped_defs = False [mypy-tests.util.caches.test_descriptors] disallow_untyped_defs = False -[mypy-tests.util.*] -disallow_untyped_defs = True - -[mypy-tests.utils] -disallow_untyped_defs = True - ;; Dependencies without annotations ;; Before ignoring a module, check to see if type stubs are available. ;; The `typeshed` project maintains stubs here: From 55e4d27b36fd69a3cf3eceecbd42706579ef2dc7 Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 8 Feb 2023 11:25:11 -0800 Subject: [PATCH 154/344] Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room (#14977) --- changelog.d/14977.misc | 1 + synapse/handlers/message.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14977.misc diff --git a/changelog.d/14977.misc b/changelog.d/14977.misc new file mode 100644 index 000000000000..4d551c52b7db --- /dev/null +++ b/changelog.d/14977.misc @@ -0,0 +1 @@ +Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room. \ No newline at end of file diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e688e00575f3..5f6da2943fc8 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -499,9 +499,9 @@ def __init__(self, hs: "HomeServer"): self.request_ratelimiter = hs.get_request_ratelimiter() - # We arbitrarily limit concurrent event creation for a room to 5. - # This is to stop us from diverging history *too* much. - self.limiter = Linearizer(max_count=5, name="room_event_creation_limit") + # We limit concurrent event creation for a room to 1. This prevents state resolution + # from occurring when sending bursts of events to a local room + self.limiter = Linearizer(max_count=1, name="room_event_creation_limit") self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator() From 4eed7b2ede2a40aa4ac59eb21d2e13dfffbb6d53 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 8 Feb 2023 14:52:37 -0500 Subject: [PATCH 155/344] Add missing type hints to tests. (#15027) --- changelog.d/15027.misc | 1 + mypy.ini | 18 ------------------ tests/test_distributor.py | 12 ++++++------ tests/test_event_auth.py | 32 +++++++++++++++++--------------- tests/test_mau.py | 35 ++++++++++++++++++++++------------- tests/test_rust.py | 2 +- tests/test_test_utils.py | 16 ++++++++-------- tests/test_types.py | 30 +++++++++++++++--------------- 8 files changed, 70 insertions(+), 76 deletions(-) create mode 100644 changelog.d/15027.misc diff --git a/changelog.d/15027.misc b/changelog.d/15027.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15027.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 1bdeb18d9496..70c106c668af 100644 --- a/mypy.ini +++ b/mypy.ini @@ -69,27 +69,9 @@ disallow_untyped_defs = False [mypy-tests.server_notices.test_resource_limits_server_notices] disallow_untyped_defs = False -[mypy-tests.test_distributor] -disallow_untyped_defs = False - -[mypy-tests.test_event_auth] -disallow_untyped_defs = False - [mypy-tests.test_federation] disallow_untyped_defs = False -[mypy-tests.test_mau] -disallow_untyped_defs = False - -[mypy-tests.test_rust] -disallow_untyped_defs = False - -[mypy-tests.test_test_utils] -disallow_untyped_defs = False - -[mypy-tests.test_types] -disallow_untyped_defs = False - [mypy-tests.test_utils.*] disallow_untyped_defs = False diff --git a/tests/test_distributor.py b/tests/test_distributor.py index 31546ea52bb0..a248f1d27711 100644 --- a/tests/test_distributor.py +++ b/tests/test_distributor.py @@ -21,10 +21,10 @@ class DistributorTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.dist = Distributor() - def test_signal_dispatch(self): + def test_signal_dispatch(self) -> None: self.dist.declare("alert") observer = Mock() @@ -33,7 +33,7 @@ def test_signal_dispatch(self): self.dist.fire("alert", 1, 2, 3) observer.assert_called_with(1, 2, 3) - def test_signal_catch(self): + def test_signal_catch(self) -> None: self.dist.declare("alarm") observers = [Mock() for i in (1, 2)] @@ -51,7 +51,7 @@ def test_signal_catch(self): self.assertEqual(mock_logger.warning.call_count, 1) self.assertIsInstance(mock_logger.warning.call_args[0][0], str) - def test_signal_prereg(self): + def test_signal_prereg(self) -> None: observer = Mock() self.dist.observe("flare", observer) @@ -60,8 +60,8 @@ def test_signal_prereg(self): observer.assert_called_with(4, 5) - def test_signal_undeclared(self): - def code(): + def test_signal_undeclared(self) -> None: + def code() -> None: self.dist.fire("notification") self.assertRaises(KeyError, code) diff --git a/tests/test_event_auth.py b/tests/test_event_auth.py index 0a7937f1cc72..2860564afc45 100644 --- a/tests/test_event_auth.py +++ b/tests/test_event_auth.py @@ -31,13 +31,13 @@ class _StubEventSourceStore: """A stub implementation of the EventSourceStore""" - def __init__(self): + def __init__(self) -> None: self._store: Dict[str, EventBase] = {} - def add_event(self, event: EventBase): + def add_event(self, event: EventBase) -> None: self._store[event.event_id] = event - def add_events(self, events: Iterable[EventBase]): + def add_events(self, events: Iterable[EventBase]) -> None: for event in events: self._store[event.event_id] = event @@ -59,7 +59,7 @@ async def get_events( class EventAuthTestCase(unittest.TestCase): - def test_rejected_auth_events(self): + def test_rejected_auth_events(self) -> None: """ Events that refer to rejected events in their auth events are rejected """ @@ -109,7 +109,7 @@ def test_rejected_auth_events(self): ) ) - def test_create_event_with_prev_events(self): + def test_create_event_with_prev_events(self) -> None: """A create event with prev_events should be rejected https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules @@ -150,7 +150,7 @@ def test_create_event_with_prev_events(self): event_auth.check_state_independent_auth_rules(event_store, bad_event) ) - def test_duplicate_auth_events(self): + def test_duplicate_auth_events(self) -> None: """Events with duplicate auth_events should be rejected https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules @@ -196,7 +196,7 @@ def test_duplicate_auth_events(self): event_auth.check_state_independent_auth_rules(event_store, bad_event2) ) - def test_unexpected_auth_events(self): + def test_unexpected_auth_events(self) -> None: """Events with excess auth_events should be rejected https://spec.matrix.org/v1.3/rooms/v9/#authorization-rules @@ -236,7 +236,7 @@ def test_unexpected_auth_events(self): event_auth.check_state_independent_auth_rules(event_store, bad_event) ) - def test_random_users_cannot_send_state_before_first_pl(self): + def test_random_users_cannot_send_state_before_first_pl(self) -> None: """ Check that, before the first PL lands, the creator is the only user that can send a state event. @@ -263,7 +263,7 @@ def test_random_users_cannot_send_state_before_first_pl(self): auth_events, ) - def test_state_default_level(self): + def test_state_default_level(self) -> None: """ Check that users above the state_default level can send state and those below cannot @@ -298,7 +298,7 @@ def test_state_default_level(self): auth_events, ) - def test_alias_event(self): + def test_alias_event(self) -> None: """Alias events have special behavior up through room version 6.""" creator = "@creator:example.com" other = "@other:example.com" @@ -333,7 +333,7 @@ def test_alias_event(self): auth_events, ) - def test_msc2432_alias_event(self): + def test_msc2432_alias_event(self) -> None: """After MSC2432, alias events have no special behavior.""" creator = "@creator:example.com" other = "@other:example.com" @@ -366,7 +366,9 @@ def test_msc2432_alias_event(self): ) @parameterized.expand([(RoomVersions.V1, True), (RoomVersions.V6, False)]) - def test_notifications(self, room_version: RoomVersion, allow_modification: bool): + def test_notifications( + self, room_version: RoomVersion, allow_modification: bool + ) -> None: """ Notifications power levels get checked due to MSC2209. """ @@ -395,7 +397,7 @@ def test_notifications(self, room_version: RoomVersion, allow_modification: bool with self.assertRaises(AuthError): event_auth.check_state_dependent_auth_rules(pl_event, auth_events) - def test_join_rules_public(self): + def test_join_rules_public(self) -> None: """ Test joining a public room. """ @@ -460,7 +462,7 @@ def test_join_rules_public(self): auth_events.values(), ) - def test_join_rules_invite(self): + def test_join_rules_invite(self) -> None: """ Test joining an invite only room. """ @@ -835,7 +837,7 @@ def _power_levels_event( ) -def _alias_event(room_version: RoomVersion, sender: str, **kwargs) -> EventBase: +def _alias_event(room_version: RoomVersion, sender: str, **kwargs: Any) -> EventBase: data = { "room_id": TEST_ROOM_ID, **_maybe_get_event_id_dict_for_room_version(room_version), diff --git a/tests/test_mau.py b/tests/test_mau.py index f14fcb7db9ba..4e7665a22b93 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -14,12 +14,17 @@ """Tests REST events for /rooms paths.""" -from typing import List +from typing import List, Optional + +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import APP_SERVICE_REGISTRATION_TYPE, LoginType from synapse.api.errors import Codes, HttpResponseException, SynapseError from synapse.appservice import ApplicationService from synapse.rest.client import register, sync +from synapse.server import HomeServer +from synapse.types import JsonDict +from synapse.util import Clock from tests import unittest from tests.unittest import override_config @@ -30,7 +35,7 @@ class TestMauLimit(unittest.HomeserverTestCase): servlets = [register.register_servlets, sync.register_servlets] - def default_config(self): + def default_config(self) -> JsonDict: config = default_config("test") config.update( @@ -53,10 +58,12 @@ def default_config(self): return config - def prepare(self, reactor, clock, homeserver): + def prepare( + self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer + ) -> None: self.store = homeserver.get_datastores().main - def test_simple_deny_mau(self): + def test_simple_deny_mau(self) -> None: # Create and sync so that the MAU counts get updated token1 = self.create_user("kermit1") self.do_sync_for_user(token1) @@ -75,7 +82,7 @@ def test_simple_deny_mau(self): self.assertEqual(e.code, 403) self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) - def test_as_ignores_mau(self): + def test_as_ignores_mau(self) -> None: """Test that application services can still create users when the MAU limit has been reached. This only works when application service user ip tracking is disabled. @@ -113,7 +120,7 @@ def test_as_ignores_mau(self): self.create_user("as_kermit4", token=as_token, appservice=True) - def test_allowed_after_a_month_mau(self): + def test_allowed_after_a_month_mau(self) -> None: # Create and sync so that the MAU counts get updated token1 = self.create_user("kermit1") self.do_sync_for_user(token1) @@ -132,7 +139,7 @@ def test_allowed_after_a_month_mau(self): self.do_sync_for_user(token3) @override_config({"mau_trial_days": 1}) - def test_trial_delay(self): + def test_trial_delay(self) -> None: # We should be able to register more than the limit initially token1 = self.create_user("kermit1") self.do_sync_for_user(token1) @@ -165,7 +172,7 @@ def test_trial_delay(self): self.assertEqual(e.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) @override_config({"mau_trial_days": 1}) - def test_trial_users_cant_come_back(self): + def test_trial_users_cant_come_back(self) -> None: self.hs.config.server.mau_trial_days = 1 # We should be able to register more than the limit initially @@ -216,7 +223,7 @@ def test_trial_users_cant_come_back(self): # max_mau_value should not matter {"max_mau_value": 1, "limit_usage_by_mau": False, "mau_stats_only": True} ) - def test_tracked_but_not_limited(self): + def test_tracked_but_not_limited(self) -> None: # Simply being able to create 2 users indicates that the # limit was not reached. token1 = self.create_user("kermit1") @@ -236,10 +243,10 @@ def test_tracked_but_not_limited(self): "mau_appservice_trial_days": {"SomeASID": 1, "AnotherASID": 2}, } ) - def test_as_trial_days(self): + def test_as_trial_days(self) -> None: user_tokens: List[str] = [] - def advance_time_and_sync(): + def advance_time_and_sync() -> None: self.reactor.advance(24 * 60 * 61) for token in user_tokens: self.do_sync_for_user(token) @@ -300,7 +307,9 @@ def advance_time_and_sync(): }, ) - def create_user(self, localpart, token=None, appservice=False): + def create_user( + self, localpart: str, token: Optional[str] = None, appservice: bool = False + ) -> str: request_data = { "username": localpart, "password": "monkey", @@ -326,7 +335,7 @@ def create_user(self, localpart, token=None, appservice=False): return access_token - def do_sync_for_user(self, token): + def do_sync_for_user(self, token: str) -> None: channel = self.make_request("GET", "/sync", access_token=token) if channel.code != 200: diff --git a/tests/test_rust.py b/tests/test_rust.py index 55d8b6b28cb4..67443b628042 100644 --- a/tests/test_rust.py +++ b/tests/test_rust.py @@ -6,6 +6,6 @@ class RustTestCase(unittest.TestCase): """Basic tests to ensure that we can call into Rust code.""" - def test_basic(self): + def test_basic(self) -> None: result = sum_as_string(1, 2) self.assertEqual("3", result) diff --git a/tests/test_test_utils.py b/tests/test_test_utils.py index d04bcae0fabe..5cd698147e12 100644 --- a/tests/test_test_utils.py +++ b/tests/test_test_utils.py @@ -17,25 +17,25 @@ class MockClockTestCase(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.clock = MockClock() - def test_advance_time(self): + def test_advance_time(self) -> None: start_time = self.clock.time() self.clock.advance_time(20) self.assertEqual(20, self.clock.time() - start_time) - def test_later(self): + def test_later(self) -> None: invoked = [0, 0] - def _cb0(): + def _cb0() -> None: invoked[0] = 1 self.clock.call_later(10, _cb0) - def _cb1(): + def _cb1() -> None: invoked[1] = 1 self.clock.call_later(20, _cb1) @@ -51,15 +51,15 @@ def _cb1(): self.assertTrue(invoked[1]) - def test_cancel_later(self): + def test_cancel_later(self) -> None: invoked = [0, 0] - def _cb0(): + def _cb0() -> None: invoked[0] = 1 t0 = self.clock.call_later(10, _cb0) - def _cb1(): + def _cb1() -> None: invoked[1] = 1 self.clock.call_later(20, _cb1) diff --git a/tests/test_types.py b/tests/test_types.py index 111116938423..c491cc9a9661 100644 --- a/tests/test_types.py +++ b/tests/test_types.py @@ -43,34 +43,34 @@ def test_two_colons(self) -> None: class UserIDTestCase(unittest.HomeserverTestCase): - def test_parse(self): + def test_parse(self) -> None: user = UserID.from_string("@1234abcd:test") self.assertEqual("1234abcd", user.localpart) self.assertEqual("test", user.domain) self.assertEqual(True, self.hs.is_mine(user)) - def test_parse_rejects_empty_id(self): + def test_parse_rejects_empty_id(self) -> None: with self.assertRaises(SynapseError): UserID.from_string("") - def test_parse_rejects_missing_sigil(self): + def test_parse_rejects_missing_sigil(self) -> None: with self.assertRaises(SynapseError): UserID.from_string("alice:example.com") - def test_parse_rejects_missing_separator(self): + def test_parse_rejects_missing_separator(self) -> None: with self.assertRaises(SynapseError): UserID.from_string("@alice.example.com") - def test_validation_rejects_missing_domain(self): + def test_validation_rejects_missing_domain(self) -> None: self.assertFalse(UserID.is_valid("@alice:")) - def test_build(self): + def test_build(self) -> None: user = UserID("5678efgh", "my.domain") self.assertEqual(user.to_string(), "@5678efgh:my.domain") - def test_compare(self): + def test_compare(self) -> None: userA = UserID.from_string("@userA:my.domain") userAagain = UserID.from_string("@userA:my.domain") userB = UserID.from_string("@userB:my.domain") @@ -80,43 +80,43 @@ def test_compare(self): class RoomAliasTestCase(unittest.HomeserverTestCase): - def test_parse(self): + def test_parse(self) -> None: room = RoomAlias.from_string("#channel:test") self.assertEqual("channel", room.localpart) self.assertEqual("test", room.domain) self.assertEqual(True, self.hs.is_mine(room)) - def test_build(self): + def test_build(self) -> None: room = RoomAlias("channel", "my.domain") self.assertEqual(room.to_string(), "#channel:my.domain") - def test_validate(self): + def test_validate(self) -> None: id_string = "#test:domain,test" self.assertFalse(RoomAlias.is_valid(id_string)) class MapUsernameTestCase(unittest.TestCase): - def testPassThrough(self): + def test_pass_througuh(self) -> None: self.assertEqual(map_username_to_mxid_localpart("test1234"), "test1234") - def testUpperCase(self): + def test_upper_case(self) -> None: self.assertEqual(map_username_to_mxid_localpart("tEST_1234"), "test_1234") self.assertEqual( map_username_to_mxid_localpart("tEST_1234", case_sensitive=True), "t_e_s_t__1234", ) - def testSymbols(self): + def test_symbols(self) -> None: self.assertEqual( map_username_to_mxid_localpart("test=$?_1234"), "test=3d=24=3f_1234" ) - def testLeadingUnderscore(self): + def test_leading_underscore(self) -> None: self.assertEqual(map_username_to_mxid_localpart("_test_1234"), "=5ftest_1234") - def testNonAscii(self): + def test_non_ascii(self) -> None: # this should work with either a unicode or a bytes self.assertEqual(map_username_to_mxid_localpart("têst"), "t=c3=aast") self.assertEqual(map_username_to_mxid_localpart("têst".encode()), "t=c3=aast") From 30509a1010f10bc7924146cac57571c4b24914d7 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 8 Feb 2023 16:29:49 -0500 Subject: [PATCH 156/344] Add more missing type hints to tests. (#15028) --- changelog.d/15028.misc | 1 + mypy.ini | 18 ----- tests/handlers/test_oidc.py | 4 +- tests/scripts/test_new_matrix_user.py | 25 +++--- tests/server_notices/test_consent.py | 14 ++-- .../test_resource_limits_server_notices.py | 35 ++++---- tests/test_federation.py | 80 ++++++++++--------- tests/test_utils/__init__.py | 26 +++--- tests/test_utils/event_injection.py | 8 +- tests/test_utils/html_parsers.py | 6 +- tests/test_utils/logging_setup.py | 4 +- tests/test_utils/oidc.py | 10 +-- tests/test_visibility.py | 2 +- tests/unittest.py | 2 +- 14 files changed, 124 insertions(+), 111 deletions(-) create mode 100644 changelog.d/15028.misc diff --git a/changelog.d/15028.misc b/changelog.d/15028.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15028.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 70c106c668af..0e5c6ccf6121 100644 --- a/mypy.ini +++ b/mypy.ini @@ -60,24 +60,6 @@ disallow_untyped_defs = False [mypy-synapse.storage.database] disallow_untyped_defs = False -[mypy-tests.scripts.test_new_matrix_user] -disallow_untyped_defs = False - -[mypy-tests.server_notices.test_consent] -disallow_untyped_defs = False - -[mypy-tests.server_notices.test_resource_limits_server_notices] -disallow_untyped_defs = False - -[mypy-tests.test_federation] -disallow_untyped_defs = False - -[mypy-tests.test_utils.*] -disallow_untyped_defs = False - -[mypy-tests.test_visibility] -disallow_untyped_defs = False - [mypy-tests.unittest] disallow_untyped_defs = False diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index adddbd002f50..951caaa6b3ba 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -150,7 +150,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: hs = self.setup_test_homeserver() self.hs_patcher = self.fake_server.patch_homeserver(hs=hs) - self.hs_patcher.start() + self.hs_patcher.start() # type: ignore[attr-defined] self.handler = hs.get_oidc_handler() self.provider = self.handler._providers["oidc"] @@ -170,7 +170,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return hs def tearDown(self) -> None: - self.hs_patcher.stop() + self.hs_patcher.stop() # type: ignore[attr-defined] return super().tearDown() def reset_mocks(self) -> None: diff --git a/tests/scripts/test_new_matrix_user.py b/tests/scripts/test_new_matrix_user.py index 22f99c6ab1ce..3285f2433ccf 100644 --- a/tests/scripts/test_new_matrix_user.py +++ b/tests/scripts/test_new_matrix_user.py @@ -12,29 +12,33 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import List, Optional from unittest.mock import Mock, patch from synapse._scripts.register_new_matrix_user import request_registration +from synapse.types import JsonDict from tests.unittest import TestCase class RegisterTestCase(TestCase): - def test_success(self): + def test_success(self) -> None: """ The script will fetch a nonce, and then generate a MAC with it, and then post that MAC. """ - def get(url, verify=None): + def get(url: str, verify: Optional[bool] = None) -> Mock: r = Mock() r.status_code = 200 r.json = lambda: {"nonce": "a"} return r - def post(url, json=None, verify=None): + def post( + url: str, json: Optional[JsonDict] = None, verify: Optional[bool] = None + ) -> Mock: # Make sure we are sent the correct info + assert json is not None self.assertEqual(json["username"], "user") self.assertEqual(json["password"], "pass") self.assertEqual(json["nonce"], "a") @@ -70,12 +74,12 @@ def post(url, json=None, verify=None): # sys.exit shouldn't have been called. self.assertEqual(err_code, []) - def test_failure_nonce(self): + def test_failure_nonce(self) -> None: """ If the script fails to fetch a nonce, it throws an error and quits. """ - def get(url, verify=None): + def get(url: str, verify: Optional[bool] = None) -> Mock: r = Mock() r.status_code = 404 r.reason = "Not Found" @@ -107,20 +111,23 @@ def get(url, verify=None): self.assertIn("ERROR! Received 404 Not Found", out) self.assertNotIn("Success!", out) - def test_failure_post(self): + def test_failure_post(self) -> None: """ The script will fetch a nonce, and then if the final POST fails, will report an error and quit. """ - def get(url, verify=None): + def get(url: str, verify: Optional[bool] = None) -> Mock: r = Mock() r.status_code = 200 r.json = lambda: {"nonce": "a"} return r - def post(url, json=None, verify=None): + def post( + url: str, json: Optional[JsonDict] = None, verify: Optional[bool] = None + ) -> Mock: # Make sure we are sent the correct info + assert json is not None self.assertEqual(json["username"], "user") self.assertEqual(json["password"], "pass") self.assertEqual(json["nonce"], "a") diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py index 58b399a04377..6540ed53f173 100644 --- a/tests/server_notices/test_consent.py +++ b/tests/server_notices/test_consent.py @@ -14,8 +14,12 @@ import os +from twisted.test.proto_helpers import MemoryReactor + import synapse.rest.admin from synapse.rest.client import login, room, sync +from synapse.server import HomeServer +from synapse.util import Clock from tests import unittest @@ -29,7 +33,7 @@ class ConsentNoticesTests(unittest.HomeserverTestCase): room.register_servlets, ] - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: tmpdir = self.mktemp() os.mkdir(tmpdir) @@ -53,15 +57,13 @@ def make_homeserver(self, reactor, clock): "room_name": "Server Notices", } - hs = self.setup_test_homeserver(config=config) - - return hs + return self.setup_test_homeserver(config=config) - def prepare(self, reactor, clock, hs): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = self.register_user("bob", "abc123") self.access_token = self.login("bob", "abc123") - def test_get_sync_message(self): + def test_get_sync_message(self) -> None: """ When user consent server notices are enabled, a sync will cause a notice to fire (in a room which the user is invited to). The notice contains diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index dadc6efcbf75..5b76383d760a 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -24,6 +24,7 @@ from synapse.server_notices.resource_limits_server_notices import ( ResourceLimitsServerNotices, ) +from synapse.types import JsonDict from synapse.util import Clock from tests import unittest @@ -33,7 +34,7 @@ class TestResourceLimitsServerNotices(unittest.HomeserverTestCase): - def default_config(self): + def default_config(self) -> JsonDict: config = default_config("test") config.update( @@ -86,18 +87,18 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self._rlsn._store.get_tags_for_room = Mock(return_value=make_awaitable({})) # type: ignore[assignment] @override_config({"hs_disabled": True}) - def test_maybe_send_server_notice_disabled_hs(self): + def test_maybe_send_server_notice_disabled_hs(self) -> None: """If the HS is disabled, we should not send notices""" self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) self._send_notice.assert_not_called() @override_config({"limit_usage_by_mau": False}) - def test_maybe_send_server_notice_to_user_flag_off(self): + def test_maybe_send_server_notice_to_user_flag_off(self) -> None: """If mau limiting is disabled, we should not send notices""" self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) self._send_notice.assert_not_called() - def test_maybe_send_server_notice_to_user_remove_blocked_notice(self): + def test_maybe_send_server_notice_to_user_remove_blocked_notice(self) -> None: """Test when user has blocked notice, but should have it removed""" self._rlsn._auth_blocking.check_auth_blocking = Mock( @@ -114,7 +115,7 @@ def test_maybe_send_server_notice_to_user_remove_blocked_notice(self): self._rlsn._server_notices_manager.maybe_get_notice_room_for_user.assert_called_once() self._send_notice.assert_called_once() - def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self): + def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self) -> None: """ Test when user has blocked notice, but notice ought to be there (NOOP) """ @@ -134,7 +135,7 @@ def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self): self._send_notice.assert_not_called() - def test_maybe_send_server_notice_to_user_add_blocked_notice(self): + def test_maybe_send_server_notice_to_user_add_blocked_notice(self) -> None: """ Test when user does not have blocked notice, but should have one """ @@ -147,7 +148,7 @@ def test_maybe_send_server_notice_to_user_add_blocked_notice(self): # Would be better to check contents, but 2 calls == set blocking event self.assertEqual(self._send_notice.call_count, 2) - def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self): + def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self) -> None: """ Test when user does not have blocked notice, nor should they (NOOP) """ @@ -159,7 +160,7 @@ def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self): self._send_notice.assert_not_called() - def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self): + def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self) -> None: """ Test when user is not part of the MAU cohort - this should not ever happen - but ... @@ -175,7 +176,9 @@ def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self): self._send_notice.assert_not_called() @override_config({"mau_limit_alerting": False}) - def test_maybe_send_server_notice_when_alerting_suppressed_room_unblocked(self): + def test_maybe_send_server_notice_when_alerting_suppressed_room_unblocked( + self, + ) -> None: """ Test that when server is over MAU limit and alerting is suppressed, then an alert message is not sent into the room @@ -191,7 +194,7 @@ def test_maybe_send_server_notice_when_alerting_suppressed_room_unblocked(self): self.assertEqual(self._send_notice.call_count, 0) @override_config({"mau_limit_alerting": False}) - def test_check_hs_disabled_unaffected_by_mau_alert_suppression(self): + def test_check_hs_disabled_unaffected_by_mau_alert_suppression(self) -> None: """ Test that when a server is disabled, that MAU limit alerting is ignored. """ @@ -207,7 +210,9 @@ def test_check_hs_disabled_unaffected_by_mau_alert_suppression(self): self.assertEqual(self._send_notice.call_count, 2) @override_config({"mau_limit_alerting": False}) - def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked(self): + def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked( + self, + ) -> None: """ When the room is already in a blocked state, test that when alerting is suppressed that the room is returned to an unblocked state. @@ -242,7 +247,7 @@ class TestResourceLimitsServerNoticesWithRealRooms(unittest.HomeserverTestCase): sync.register_servlets, ] - def default_config(self): + def default_config(self) -> JsonDict: c = super().default_config() c["server_notices"] = { "system_mxid_localpart": "server", @@ -270,7 +275,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.user_id = "@user_id:test" - def test_server_notice_only_sent_once(self): + def test_server_notice_only_sent_once(self) -> None: self.store.get_monthly_active_count = Mock(return_value=make_awaitable(1000)) self.store.user_last_seen_monthly_active = Mock( @@ -306,7 +311,7 @@ def test_server_notice_only_sent_once(self): self.assertEqual(count, 1) - def test_no_invite_without_notice(self): + def test_no_invite_without_notice(self) -> None: """Tests that a user doesn't get invited to a server notices room without a server notice being sent. @@ -328,7 +333,7 @@ def test_no_invite_without_notice(self): m.assert_called_once_with(user_id) - def test_invite_with_notice(self): + def test_invite_with_notice(self) -> None: """Tests that, if the MAU limit is hit, the server notices user invites each user to a room in which it has sent a notice. """ diff --git a/tests/test_federation.py b/tests/test_federation.py index 80e5c590d836..ddb43c8c981a 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -12,53 +12,48 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional, Union from unittest.mock import Mock from twisted.internet.defer import succeed +from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import FederationError from synapse.api.room_versions import RoomVersions -from synapse.events import make_event_from_dict +from synapse.events import EventBase, make_event_from_dict +from synapse.events.snapshot import EventContext from synapse.federation.federation_base import event_from_pdu_json +from synapse.http.types import QueryParams from synapse.logging.context import LoggingContext -from synapse.types import UserID, create_requester +from synapse.server import HomeServer +from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock from synapse.util.retryutils import NotRetryingDestination from tests import unittest -from tests.server import ThreadedMemoryReactorClock, setup_test_homeserver from tests.test_utils import make_awaitable class MessageAcceptTests(unittest.HomeserverTestCase): - def setUp(self): - + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: self.http_client = Mock() - self.reactor = ThreadedMemoryReactorClock() - self.hs_clock = Clock(self.reactor) - self.homeserver = setup_test_homeserver( - self.addCleanup, - federation_http_client=self.http_client, - clock=self.hs_clock, - reactor=self.reactor, - ) + return self.setup_test_homeserver(federation_http_client=self.http_client) + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: user_id = UserID("us", "test") our_user = create_requester(user_id) - room_creator = self.homeserver.get_room_creation_handler() + room_creator = self.hs.get_room_creation_handler() self.room_id = self.get_success( room_creator.create_room( our_user, room_creator._presets_dict["public_chat"], ratelimit=False ) )[0]["room_id"] - self.store = self.homeserver.get_datastores().main + self.store = self.hs.get_datastores().main # Figure out what the most recent event is most_recent = self.get_success( - self.homeserver.get_datastores().main.get_latest_event_ids_in_room( - self.room_id - ) + self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) )[0] join_event = make_event_from_dict( @@ -78,14 +73,16 @@ def setUp(self): } ) - self.handler = self.homeserver.get_federation_handler() - federation_event_handler = self.homeserver.get_federation_event_handler() + self.handler = self.hs.get_federation_handler() + federation_event_handler = self.hs.get_federation_event_handler() - async def _check_event_auth(origin, event, context): + async def _check_event_auth( + origin: Optional[str], event: EventBase, context: EventContext + ) -> None: pass federation_event_handler._check_event_auth = _check_event_auth - self.client = self.homeserver.get_federation_client() + self.client = self.hs.get_federation_client() self.client._check_sigs_and_hash_for_pulled_events_and_fetch = ( lambda dest, pdus, **k: succeed(pdus) ) @@ -104,16 +101,25 @@ async def _check_event_auth(origin, event, context): "$join:test.serv", ) - def test_cant_hide_direct_ancestors(self): + def test_cant_hide_direct_ancestors(self) -> None: """ If you send a message, you must be able to provide the direct prev_events that said event references. """ - async def post_json(destination, path, data, headers=None, timeout=0): + async def post_json( + destination: str, + path: str, + data: Optional[JsonDict] = None, + long_retries: bool = False, + timeout: Optional[int] = None, + ignore_backoff: bool = False, + args: Optional[QueryParams] = None, + ) -> Union[JsonDict, list]: # If it asks us for new missing events, give them NOTHING if path.startswith("/_matrix/federation/v1/get_missing_events/"): return {"events": []} + return {} self.http_client.post_json = post_json @@ -138,7 +144,7 @@ async def post_json(destination, path, data, headers=None, timeout=0): } ) - federation_event_handler = self.homeserver.get_federation_event_handler() + federation_event_handler = self.hs.get_federation_event_handler() with LoggingContext("test-context"): failure = self.get_failure( federation_event_handler.on_receive_pdu("test.serv", lying_event), @@ -158,7 +164,7 @@ async def post_json(destination, path, data, headers=None, timeout=0): extrem = self.get_success(self.store.get_latest_event_ids_in_room(self.room_id)) self.assertEqual(extrem[0], "$join:test.serv") - def test_retry_device_list_resync(self): + def test_retry_device_list_resync(self) -> None: """Tests that device lists are marked as stale if they couldn't be synced, and that stale device lists are retried periodically. """ @@ -171,24 +177,26 @@ def test_retry_device_list_resync(self): # When this function is called, increment the number of resync attempts (only if # we're querying devices for the right user ID), then raise a # NotRetryingDestination error to fail the resync gracefully. - def query_user_devices(destination, user_id): + def query_user_devices( + destination: str, user_id: str, timeout: int = 30000 + ) -> JsonDict: if user_id == remote_user_id: self.resync_attempts += 1 raise NotRetryingDestination(0, 0, destination) # Register the mock on the federation client. - federation_client = self.homeserver.get_federation_client() + federation_client = self.hs.get_federation_client() federation_client.query_user_devices = Mock(side_effect=query_user_devices) # Register a mock on the store so that the incoming update doesn't fail because # we don't share a room with the user. - store = self.homeserver.get_datastores().main + store = self.hs.get_datastores().main store.get_rooms_for_user = Mock(return_value=make_awaitable(["!someroom:test"])) # Manually inject a fake device list update. We need this update to include at # least one prev_id so that the user's device list will need to be retried. - device_list_updater = self.homeserver.get_device_handler().device_list_updater + device_list_updater = self.hs.get_device_handler().device_list_updater self.get_success( device_list_updater.incoming_device_list_update( origin=remote_origin, @@ -218,7 +226,7 @@ def query_user_devices(destination, user_id): self.reactor.advance(30) self.assertEqual(self.resync_attempts, 2) - def test_cross_signing_keys_retry(self): + def test_cross_signing_keys_retry(self) -> None: """Tests that resyncing a device list correctly processes cross-signing keys from the remote server. """ @@ -227,7 +235,7 @@ def test_cross_signing_keys_retry(self): remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" # Register mock device list retrieval on the federation client. - federation_client = self.homeserver.get_federation_client() + federation_client = self.hs.get_federation_client() federation_client.query_user_devices = Mock( return_value=make_awaitable( { @@ -252,7 +260,7 @@ def test_cross_signing_keys_retry(self): ) # Resync the device list. - device_handler = self.homeserver.get_device_handler() + device_handler = self.hs.get_device_handler() self.get_success( device_handler.device_list_updater.user_device_resync(remote_user_id), ) @@ -279,7 +287,7 @@ def test_cross_signing_keys_retry(self): class StripUnsignedFromEventsTestCase(unittest.TestCase): - def test_strip_unauthorized_unsigned_values(self): + def test_strip_unauthorized_unsigned_values(self) -> None: event1 = { "sender": "@baduser:test.serv", "state_key": "@baduser:test.serv", @@ -296,7 +304,7 @@ def test_strip_unauthorized_unsigned_values(self): # Make sure unauthorized fields are stripped from unsigned self.assertNotIn("more warez", filtered_event.unsigned) - def test_strip_event_maintains_allowed_fields(self): + def test_strip_event_maintains_allowed_fields(self) -> None: event2 = { "sender": "@baduser:test.serv", "state_key": "@baduser:test.serv", @@ -323,7 +331,7 @@ def test_strip_event_maintains_allowed_fields(self): self.assertIn("invite_room_state", filtered_event2.unsigned) self.assertEqual([], filtered_event2.unsigned["invite_room_state"]) - def test_strip_event_removes_fields_based_on_event_type(self): + def test_strip_event_removes_fields_based_on_event_type(self) -> None: event3 = { "sender": "@baduser:test.serv", "state_key": "@baduser:test.serv", diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index e62ebcc6a5a3..e5dae670a70e 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -20,12 +20,13 @@ import warnings from asyncio import Future from binascii import unhexlify -from typing import Awaitable, Callable, Tuple, TypeVar +from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, TypeVar from unittest.mock import Mock import attr import zope.interface +from twisted.internet.interfaces import IProtocol from twisted.python.failure import Failure from twisted.web.client import ResponseDone from twisted.web.http import RESPONSES @@ -34,6 +35,9 @@ from synapse.types import JsonDict +if TYPE_CHECKING: + from sys import UnraisableHookArgs + TV = TypeVar("TV") @@ -78,25 +82,29 @@ def setup_awaitable_errors() -> Callable[[], None]: unraisable_exceptions = [] orig_unraisablehook = sys.unraisablehook - def unraisablehook(unraisable): + def unraisablehook(unraisable: "UnraisableHookArgs") -> None: unraisable_exceptions.append(unraisable.exc_value) - def cleanup(): + def cleanup() -> None: """ A method to be used as a clean-up that fails a test-case if there are any new unraisable exceptions. """ sys.unraisablehook = orig_unraisablehook if unraisable_exceptions: - raise unraisable_exceptions.pop() + exc = unraisable_exceptions.pop() + assert exc is not None + raise exc sys.unraisablehook = unraisablehook return cleanup -def simple_async_mock(return_value=None, raises=None) -> Mock: +def simple_async_mock( + return_value: Optional[TV] = None, raises: Optional[Exception] = None +) -> Mock: # AsyncMock is not available in python3.5, this mimics part of its behaviour - async def cb(*args, **kwargs): + async def cb(*args: Any, **kwargs: Any) -> Optional[TV]: if raises: raise raises return return_value @@ -125,14 +133,14 @@ class FakeResponse: # type: ignore[misc] headers: Headers = attr.Factory(Headers) @property - def phrase(self): + def phrase(self) -> bytes: return RESPONSES.get(self.code, b"Unknown Status") @property - def length(self): + def length(self) -> int: return len(self.body) - def deliverBody(self, protocol): + def deliverBody(self, protocol: IProtocol) -> None: protocol.dataReceived(self.body) protocol.connectionLost(Failure(ResponseDone())) diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index 8027c7a856e2..1a50c2acf12a 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -12,7 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Tuple +from typing import Any, List, Optional, Tuple import synapse.server from synapse.api.constants import EventTypes @@ -32,7 +32,7 @@ async def inject_member_event( membership: str, target: Optional[str] = None, extra_content: Optional[dict] = None, - **kwargs, + **kwargs: Any, ) -> EventBase: """Inject a membership event into a room.""" if target is None: @@ -57,7 +57,7 @@ async def inject_event( hs: synapse.server.HomeServer, room_version: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, - **kwargs, + **kwargs: Any, ) -> EventBase: """Inject a generic event into a room @@ -82,7 +82,7 @@ async def create_event( hs: synapse.server.HomeServer, room_version: Optional[str] = None, prev_event_ids: Optional[List[str]] = None, - **kwargs, + **kwargs: Any, ) -> Tuple[EventBase, EventContext]: if room_version is None: room_version = await hs.get_datastores().main.get_room_version_id( diff --git a/tests/test_utils/html_parsers.py b/tests/test_utils/html_parsers.py index e878af5f12e7..189c697efbee 100644 --- a/tests/test_utils/html_parsers.py +++ b/tests/test_utils/html_parsers.py @@ -13,13 +13,13 @@ # limitations under the License. from html.parser import HTMLParser -from typing import Dict, Iterable, List, Optional, Tuple +from typing import Dict, Iterable, List, NoReturn, Optional, Tuple class TestHtmlParser(HTMLParser): """A generic HTML page parser which extracts useful things from the HTML""" - def __init__(self): + def __init__(self) -> None: super().__init__() # a list of links found in the doc @@ -48,5 +48,5 @@ def handle_starttag( assert input_name self.hiddens[input_name] = attr_dict["value"] - def error(_, message): + def error(self, message: str) -> NoReturn: raise AssertionError(message) diff --git a/tests/test_utils/logging_setup.py b/tests/test_utils/logging_setup.py index 304c7b98c5c9..b522163a3444 100644 --- a/tests/test_utils/logging_setup.py +++ b/tests/test_utils/logging_setup.py @@ -25,7 +25,7 @@ class ToTwistedHandler(logging.Handler): tx_log = twisted.logger.Logger() - def emit(self, record): + def emit(self, record: logging.LogRecord) -> None: log_entry = self.format(record) log_level = record.levelname.lower().replace("warning", "warn") self.tx_log.emit( @@ -33,7 +33,7 @@ def emit(self, record): ) -def setup_logging(): +def setup_logging() -> None: """Configure the python logging appropriately for the tests. (Logs will end up in _trial_temp.) diff --git a/tests/test_utils/oidc.py b/tests/test_utils/oidc.py index 1461d23ee823..d555b242555d 100644 --- a/tests/test_utils/oidc.py +++ b/tests/test_utils/oidc.py @@ -14,7 +14,7 @@ import json -from typing import Any, Dict, List, Optional, Tuple +from typing import Any, ContextManager, Dict, List, Optional, Tuple from unittest.mock import Mock, patch from urllib.parse import parse_qs @@ -77,14 +77,14 @@ def __init__(self, clock: Clock, issuer: str): self._id_token_overrides: Dict[str, Any] = {} - def reset_mocks(self): + def reset_mocks(self) -> None: self.request.reset_mock() self.get_jwks_handler.reset_mock() self.get_metadata_handler.reset_mock() self.get_userinfo_handler.reset_mock() self.post_token_handler.reset_mock() - def patch_homeserver(self, hs: HomeServer): + def patch_homeserver(self, hs: HomeServer) -> ContextManager[Mock]: """Patch the ``HomeServer`` HTTP client to handle requests through the ``FakeOidcServer``. This patch should be used whenever the HS is expected to perform request to the @@ -188,7 +188,7 @@ def generate_logout_token(self, grant: FakeAuthorizationGrant) -> str: return self._sign(logout_token) - def id_token_override(self, overrides: dict): + def id_token_override(self, overrides: dict) -> ContextManager[dict]: """Temporarily patch the ID token generated by the token endpoint.""" return patch.object(self, "_id_token_overrides", overrides) @@ -247,7 +247,7 @@ def buggy_endpoint( metadata: bool = False, token: bool = False, userinfo: bool = False, - ): + ) -> ContextManager[Dict[str, Mock]]: """A context which makes a set of endpoints return a 500 error. Args: diff --git a/tests/test_visibility.py b/tests/test_visibility.py index d0b9ad54540d..875e37988f5d 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -258,7 +258,7 @@ def _inject_outlier(self) -> EventBase: class FilterEventsForClientTestCase(unittest.FederatingHomeserverTestCase): - def test_out_of_band_invite_rejection(self): + def test_out_of_band_invite_rejection(self) -> None: # this is where we have received an invite event over federation, and then # rejected it. invite_pdu = { diff --git a/tests/unittest.py b/tests/unittest.py index fa92dd94eb6a..68e59a88dc0f 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -315,7 +315,7 @@ def setUp(self) -> None: # This has to be a function and not just a Mock, because # `self.helper.auth_user_id` is temporarily reassigned in some tests - async def get_requester(*args, **kwargs) -> Requester: + async def get_requester(*args: Any, **kwargs: Any) -> Requester: assert self.helper.auth_user_id is not None return create_requester( user_id=UserID.from_string(self.helper.auth_user_id), From 7081bb56e2b4201ee730310a88180b6f76af1337 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 9 Feb 2023 00:23:35 +0000 Subject: [PATCH 157/344] Proper types for `tests.module_api` (#15031) * -> None for test methods * A first batch of type fixes * Introduce common parent test case * Fixup that big test method * tests.module_api passes mypy * Changelog --- changelog.d/15031.misc | 1 + mypy.ini | 1 - tests/events/test_presence_router.py | 10 ++- tests/module_api/test_api.py | 122 ++++++++++++++++----------- 4 files changed, 80 insertions(+), 54 deletions(-) create mode 100644 changelog.d/15031.misc diff --git a/changelog.d/15031.misc b/changelog.d/15031.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15031.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 0e5c6ccf6121..3f144e61fbba 100644 --- a/mypy.ini +++ b/mypy.ini @@ -32,7 +32,6 @@ exclude = (?x) |synapse/storage/databases/main/cache.py |synapse/storage/schema/ - |tests/module_api/test_api.py |tests/server.py )$ diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index a9893def7422..741bb6464a81 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -31,7 +31,11 @@ from tests.handlers.test_sync import generate_sync_config from tests.test_utils import simple_async_mock -from tests.unittest import FederatingHomeserverTestCase, override_config +from tests.unittest import ( + FederatingHomeserverTestCase, + HomeserverTestCase, + override_config, +) @attr.s @@ -470,7 +474,7 @@ def send_local_online_presence_to_with_module_test_body(self) -> None: def send_presence_update( - testcase: FederatingHomeserverTestCase, + testcase: HomeserverTestCase, user_id: str, access_token: str, presence_state: str, @@ -491,7 +495,7 @@ def send_presence_update( def sync_presence( - testcase: FederatingHomeserverTestCase, + testcase: HomeserverTestCase, user_id: str, since_token: Optional[StreamToken] = None, ) -> Tuple[List[UserPresenceState], StreamToken]: diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 8f88c0117d78..cc173ebda62f 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -11,9 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Any, Dict from unittest.mock import Mock from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EduTypes, EventTypes from synapse.api.errors import NotFoundError @@ -21,9 +23,12 @@ from synapse.federation.units import Transaction from synapse.handlers.presence import UserPresenceState from synapse.handlers.push_rules import InvalidRuleException +from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client import login, notifications, presence, profile, room -from synapse.types import create_requester +from synapse.server import HomeServer +from synapse.types import JsonDict, create_requester +from synapse.util import Clock from tests.events.test_presence_router import send_presence_update, sync_presence from tests.replication._base import BaseMultiWorkerStreamTestCase @@ -32,7 +37,19 @@ from tests.unittest import HomeserverTestCase, override_config -class ModuleApiTestCase(HomeserverTestCase): +class BaseModuleApiTestCase(HomeserverTestCase): + """Common properties of the two test case classes.""" + + module_api: ModuleApi + + # These are all written by _test_sending_local_online_presence_to_local_user. + presence_receiver_id: str + presence_receiver_tok: str + presence_sender_id: str + presence_sender_tok: str + + +class ModuleApiTestCase(BaseModuleApiTestCase): servlets = [ admin.register_servlets, login.register_servlets, @@ -42,14 +59,14 @@ class ModuleApiTestCase(HomeserverTestCase): notifications.register_servlets, ] - def prepare(self, reactor, clock, homeserver): - self.store = homeserver.get_datastores().main - self.module_api = homeserver.get_module_api() - self.event_creation_handler = homeserver.get_event_creation_handler() - self.sync_handler = homeserver.get_sync_handler() - self.auth_handler = homeserver.get_auth_handler() + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.module_api = hs.get_module_api() + self.event_creation_handler = hs.get_event_creation_handler() + self.sync_handler = hs.get_sync_handler() + self.auth_handler = hs.get_auth_handler() - def make_homeserver(self, reactor, clock): + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # Mock out the calls over federation. fed_transport_client = Mock(spec=["send_transaction"]) fed_transport_client.send_transaction = simple_async_mock({}) @@ -58,7 +75,7 @@ def make_homeserver(self, reactor, clock): federation_transport_client=fed_transport_client, ) - def test_can_register_user(self): + def test_can_register_user(self) -> None: """Tests that an external module can register a user""" # Register a new user user_id, access_token = self.get_success( @@ -88,16 +105,17 @@ def test_can_register_user(self): displayname = self.get_success(self.store.get_profile_displayname("bob")) self.assertEqual(displayname, "Bobberino") - def test_can_register_admin_user(self): + def test_can_register_admin_user(self) -> None: user_id = self.register_user( "bob_module_admin", "1234", displayname="Bobberino Admin", admin=True ) found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id)) + assert found_user is not None self.assertEqual(found_user.user_id.to_string(), user_id) self.assertIdentical(found_user.is_admin, True) - def test_can_set_admin(self): + def test_can_set_admin(self) -> None: user_id = self.register_user( "alice_wants_admin", "1234", @@ -107,16 +125,17 @@ def test_can_set_admin(self): self.get_success(self.module_api.set_user_admin(user_id, True)) found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id)) + assert found_user is not None self.assertEqual(found_user.user_id.to_string(), user_id) self.assertIdentical(found_user.is_admin, True) - def test_can_set_displayname(self): + def test_can_set_displayname(self) -> None: localpart = "alice_wants_a_new_displayname" user_id = self.register_user( localpart, "1234", displayname="Alice", admin=False ) found_userinfo = self.get_success(self.module_api.get_userinfo_by_id(user_id)) - + assert found_userinfo is not None self.get_success( self.module_api.set_displayname( found_userinfo.user_id, "Bob", deactivation=False @@ -128,17 +147,18 @@ def test_can_set_displayname(self): self.assertEqual(found_profile.display_name, "Bob") - def test_get_userinfo_by_id(self): + def test_get_userinfo_by_id(self) -> None: user_id = self.register_user("alice", "1234") found_user = self.get_success(self.module_api.get_userinfo_by_id(user_id)) + assert found_user is not None self.assertEqual(found_user.user_id.to_string(), user_id) self.assertIdentical(found_user.is_admin, False) - def test_get_userinfo_by_id__no_user_found(self): + def test_get_userinfo_by_id__no_user_found(self) -> None: found_user = self.get_success(self.module_api.get_userinfo_by_id("@alice:test")) self.assertIsNone(found_user) - def test_get_user_ip_and_agents(self): + def test_get_user_ip_and_agents(self) -> None: user_id = self.register_user("test_get_user_ip_and_agents_user", "1234") # Initially, we should have no ip/agent for our user. @@ -185,7 +205,7 @@ def test_get_user_ip_and_agents(self): # we should only find the second ip, agent. info = self.get_success( self.module_api.get_user_ip_and_agents( - user_id, (last_seen_1 + last_seen_2) / 2 + user_id, (last_seen_1 + last_seen_2) // 2 ) ) self.assertEqual(len(info), 1) @@ -200,7 +220,7 @@ def test_get_user_ip_and_agents(self): ) self.assertEqual(info, []) - def test_get_user_ip_and_agents__no_user_found(self): + def test_get_user_ip_and_agents__no_user_found(self) -> None: info = self.get_success( self.module_api.get_user_ip_and_agents( "@test_get_user_ip_and_agents_user_nonexistent:example.com" @@ -208,10 +228,10 @@ def test_get_user_ip_and_agents__no_user_found(self): ) self.assertEqual(info, []) - def test_sending_events_into_room(self): + def test_sending_events_into_room(self) -> None: """Tests that a module can send events into a room""" # Mock out create_and_send_nonmember_event to check whether events are being sent - self.event_creation_handler.create_and_send_nonmember_event = Mock( + self.event_creation_handler.create_and_send_nonmember_event = Mock( # type: ignore[assignment] spec=[], side_effect=self.event_creation_handler.create_and_send_nonmember_event, ) @@ -222,7 +242,7 @@ def test_sending_events_into_room(self): room_id = self.helper.create_room_as(user_id, tok=tok) # Create and send a non-state event - content = {"body": "I am a puppet", "msgtype": "m.text"} + content: JsonDict = {"body": "I am a puppet", "msgtype": "m.text"} event_dict = { "room_id": room_id, "type": "m.room.message", @@ -265,7 +285,7 @@ def test_sending_events_into_room(self): "sender": user_id, "state_key": "", } - event: EventBase = self.get_success( + event = self.get_success( self.module_api.create_and_send_event_into_room(event_dict) ) self.assertEqual(event.sender, user_id) @@ -303,7 +323,7 @@ def test_sending_events_into_room(self): self.module_api.create_and_send_event_into_room(event_dict), Exception ) - def test_public_rooms(self): + def test_public_rooms(self) -> None: """Tests that a room can be added and removed from the public rooms list, as well as have its public rooms directory state queried. """ @@ -350,13 +370,13 @@ def test_public_rooms(self): ) self.assertFalse(is_in_public_rooms) - def test_send_local_online_presence_to(self): + def test_send_local_online_presence_to(self) -> None: # Test sending local online presence to users from the main process _test_sending_local_online_presence_to_local_user(self, test_with_workers=False) # Enable federation sending on the main process. @override_config({"federation_sender_instances": None}) - def test_send_local_online_presence_to_federation(self): + def test_send_local_online_presence_to_federation(self) -> None: """Tests that send_local_presence_to_users sends local online presence to remote users.""" # Create a user who will send presence updates self.presence_sender_id = self.register_user("presence_sender1", "monkey") @@ -431,7 +451,7 @@ def test_send_local_online_presence_to_federation(self): self.assertTrue(found_update) - def test_update_membership(self): + def test_update_membership(self) -> None: """Tests that the module API can update the membership of a user in a room.""" peter = self.register_user("peter", "hackme") lesley = self.register_user("lesley", "hackme") @@ -554,7 +574,7 @@ def test_update_membership(self): self.assertEqual(res["displayname"], "simone") self.assertIsNone(res["avatar_url"]) - def test_update_room_membership_remote_join(self): + def test_update_room_membership_remote_join(self) -> None: """Test that the module API can join a remote room.""" # Necessary to fake a remote join. fake_stream_id = 1 @@ -582,7 +602,7 @@ def test_update_room_membership_remote_join(self): # Check that a remote join was attempted. self.assertEqual(mocked_remote_join.call_count, 1) - def test_get_room_state(self): + def test_get_room_state(self) -> None: """Tests that a module can retrieve the state of a room through the module API.""" user_id = self.register_user("peter", "hackme") tok = self.login("peter", "hackme") @@ -677,7 +697,7 @@ def test_check_push_rules_actions(self) -> None: self.module_api.check_push_rule_actions(["foo"]) with self.assertRaises(InvalidRuleException): - self.module_api.check_push_rule_actions({"foo": "bar"}) + self.module_api.check_push_rule_actions([{"foo": "bar"}]) self.module_api.check_push_rule_actions(["notify"]) @@ -756,7 +776,7 @@ def test_create_room(self) -> None: self.assertIsNone(room_alias) -class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase): +class ModuleApiWorkerTestCase(BaseModuleApiTestCase, BaseMultiWorkerStreamTestCase): """For testing ModuleApi functionality in a multi-worker setup""" servlets = [ @@ -766,7 +786,7 @@ class ModuleApiWorkerTestCase(BaseMultiWorkerStreamTestCase): presence.register_servlets, ] - def default_config(self): + def default_config(self) -> Dict[str, Any]: conf = super().default_config() conf["stream_writers"] = {"presence": ["presence_writer"]} conf["instance_map"] = { @@ -774,18 +794,18 @@ def default_config(self): } return conf - def prepare(self, reactor, clock, homeserver): - self.module_api = homeserver.get_module_api() - self.sync_handler = homeserver.get_sync_handler() + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.module_api = hs.get_module_api() + self.sync_handler = hs.get_sync_handler() - def test_send_local_online_presence_to_workers(self): + def test_send_local_online_presence_to_workers(self) -> None: # Test sending local online presence to users from a worker process _test_sending_local_online_presence_to_local_user(self, test_with_workers=True) def _test_sending_local_online_presence_to_local_user( - test_case: HomeserverTestCase, test_with_workers: bool = False -): + test_case: BaseModuleApiTestCase, test_with_workers: bool = False +) -> None: """Tests that send_local_presence_to_users sends local online presence to local users. This simultaneously tests two different usecases: @@ -852,6 +872,7 @@ def _test_sending_local_online_presence_to_local_user( # Replicate the current sync presence token from the main process to the worker process. # We need to do this so that the worker process knows the current presence stream ID to # insert into the database when we call ModuleApi.send_local_online_presence_to. + assert isinstance(test_case, BaseMultiWorkerStreamTestCase) test_case.replicate() # Syncing again should result in no presence updates @@ -868,6 +889,7 @@ def _test_sending_local_online_presence_to_local_user( # Determine on which process (main or worker) to call ModuleApi.send_local_online_presence_to on if test_with_workers: + assert isinstance(test_case, BaseMultiWorkerStreamTestCase) module_api_to_use = worker_hs.get_module_api() else: module_api_to_use = test_case.module_api @@ -875,12 +897,11 @@ def _test_sending_local_online_presence_to_local_user( # Trigger sending local online presence. We expect this information # to be saved to the database where all processes can access it. # Note that we're syncing via the master. - d = module_api_to_use.send_local_online_presence_to( - [ - test_case.presence_receiver_id, - ] + d = defer.ensureDeferred( + module_api_to_use.send_local_online_presence_to( + [test_case.presence_receiver_id], + ) ) - d = defer.ensureDeferred(d) if test_with_workers: # In order for the required presence_set_state replication request to occur between the @@ -897,7 +918,7 @@ def _test_sending_local_online_presence_to_local_user( ) test_case.assertEqual(len(presence_updates), 1) - presence_update: UserPresenceState = presence_updates[0] + presence_update = presence_updates[0] test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id) test_case.assertEqual(presence_update.state, "online") @@ -908,7 +929,7 @@ def _test_sending_local_online_presence_to_local_user( ) test_case.assertEqual(len(presence_updates), 1) - presence_update: UserPresenceState = presence_updates[0] + presence_update = presence_updates[0] test_case.assertEqual(presence_update.user_id, test_case.presence_sender_id) test_case.assertEqual(presence_update.state, "online") @@ -936,12 +957,13 @@ def _test_sending_local_online_presence_to_local_user( test_case.assertEqual(len(presence_updates), 1) # Now trigger sending local online presence. - d = module_api_to_use.send_local_online_presence_to( - [ - test_case.presence_receiver_id, - ] + d = defer.ensureDeferred( + module_api_to_use.send_local_online_presence_to( + [ + test_case.presence_receiver_id, + ] + ) ) - d = defer.ensureDeferred(d) if test_with_workers: # In order for the required presence_set_state replication request to occur between the From 733531ee3e695da92f10e01b24f62ee35e09e4cd Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 9 Feb 2023 09:49:04 -0500 Subject: [PATCH 158/344] Add final type hint to synapse.server. (#15035) --- changelog.d/15035.misc | 1 + mypy.ini | 3 --- synapse/handlers/room.py | 2 +- synapse/server.py | 12 +++++------- synapse/storage/_base.py | 2 ++ synapse/storage/database.py | 1 + synapse/storage/databases/main/events.py | 2 +- 7 files changed, 11 insertions(+), 12 deletions(-) create mode 100644 changelog.d/15035.misc diff --git a/changelog.d/15035.misc b/changelog.d/15035.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15035.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 3f144e61fbba..57f27ba4f72f 100644 --- a/mypy.ini +++ b/mypy.ini @@ -53,9 +53,6 @@ warn_unused_ignores = False [mypy-synapse.util.caches.treecache] disallow_untyped_defs = False -[mypy-synapse.server] -disallow_untyped_defs = False - [mypy-synapse.storage.database] disallow_untyped_defs = False diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 7ba7c4ff0714..0e759b8a5d7a 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1076,7 +1076,7 @@ async def _send_events_for_new_room( state_map: MutableStateMap[str] = {} # current_state_group of last event created. Used for computing event context of # events to be batched - current_state_group = None + current_state_group: Optional[int] = None def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict: e = {"type": etype, "content": content} diff --git a/synapse/server.py b/synapse/server.py index 9d6d268f490c..efc6b5f895da 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -21,7 +21,7 @@ import abc import functools import logging -from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, TypeVar, cast +from typing import TYPE_CHECKING, Callable, Dict, List, Optional, TypeVar, cast from twisted.internet.interfaces import IOpenSSLContextFactory from twisted.internet.tcp import Port @@ -144,10 +144,10 @@ from synapse.handlers.saml import SamlHandler -T = TypeVar("T", bound=Callable[..., Any]) +T = TypeVar("T") -def cache_in_self(builder: T) -> T: +def cache_in_self(builder: Callable[["HomeServer"], T]) -> Callable[["HomeServer"], T]: """Wraps a function called e.g. `get_foo`, checking if `self.foo` exists and returning if so. If not, calls the given function and sets `self.foo` to it. @@ -166,7 +166,7 @@ def cache_in_self(builder: T) -> T: building = [False] @functools.wraps(builder) - def _get(self): + def _get(self: "HomeServer") -> T: try: return getattr(self, depname) except AttributeError: @@ -185,9 +185,7 @@ def _get(self): return dep - # We cast here as we need to tell mypy that `_get` has the same signature as - # `builder`. - return cast(T, _get) + return _get class HomeServer(metaclass=abc.ABCMeta): diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 41d9111019b2..481fec72fe1b 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -37,6 +37,8 @@ class SQLBaseStore(metaclass=ABCMeta): per data store (and not one per physical database). """ + db_pool: DatabasePool + def __init__( self, database: DatabasePool, diff --git a/synapse/storage/database.py b/synapse/storage/database.py index e20c5c5302fa..feaa6cdd073b 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -499,6 +499,7 @@ class DatabasePool: """ _TXN_ID = 0 + engine: BaseDatabaseEngine def __init__( self, diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 1536937b67e8..cb66376fb4bf 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -306,7 +306,7 @@ async def _get_prevs_before_rejected(self, event_ids: Iterable[str]) -> Set[str] # The set of event_ids to return. This includes all soft-failed events # and their prev events. - existing_prevs = set() + existing_prevs: Set[str] = set() def _get_prevs_before_rejected_txn( txn: LoggingTransaction, batch: Collection[str] From cd2484dc2e943e40242337dae61f5170638116a2 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 9 Feb 2023 15:28:26 +0000 Subject: [PATCH 159/344] Bump schema version (#15036) * Bump schema version This should have been included in f10caa73eee0caa91cf373966104d1ededae2aee (and #14979). * Changelog --- changelog.d/15036.misc | 1 + synapse/storage/schema/__init__.py | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15036.misc diff --git a/changelog.d/15036.misc b/changelog.d/15036.misc new file mode 100644 index 000000000000..b0adc9c9d1d6 --- /dev/null +++ b/changelog.d/15036.misc @@ -0,0 +1 @@ +Prepare for future database schema changes. diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 19dbf2da7fee..d3103a6c7a05 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -SCHEMA_VERSION = 73 # remember to update the list below when updating +SCHEMA_VERSION = 74 # remember to update the list below when updating """Represents the expectations made by the codebase about the database schema This should be incremented whenever the codebase changes its requirements on the @@ -78,7 +78,7 @@ - Unused column application_services_state.last_txn is dropped - Cache invalidation stream id sequence now begins at 2 to match code expectation. -Changes in SCHEMA_VERSION = 73; +Changes in SCHEMA_VERSION = 73: - thread_id column is added to event_push_actions, event_push_actions_staging event_push_summary, receipts_linearized, and receipts_graph. - Add table `event_failed_pull_attempts` to keep track when we fail to pull @@ -86,6 +86,11 @@ - Add indexes to various tables (`event_failed_pull_attempts`, `insertion_events`, `batch_events`) to make it easy to delete all associated rows when purging a room. - `inserted_ts` column is added to `event_push_actions_staging` table. + +Changes in SCHEMA_VERSION = 74: + - A query on `event_stream_ordering` column has now been disambiguated (i.e. the + codebase can handle the `current_state_events`, `local_current_memberships` and + `room_memberships` tables having an `event_stream_ordering` column). """ From 8a6e0434889ea94893119775b6f56904cbc575c2 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 9 Feb 2023 10:56:02 -0500 Subject: [PATCH 160/344] Avoid mutating cached room aliases. (#15038) This might cause incorrect data in other callers which are not expecting the canonical alias to be added into the response. --- changelog.d/15038.bugfix | 1 + synapse/handlers/directory.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15038.bugfix diff --git a/changelog.d/15038.bugfix b/changelog.d/15038.bugfix new file mode 100644 index 000000000000..4695a097561c --- /dev/null +++ b/changelog.d/15038.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where the room aliases returned could be corrupted. diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 2ea52257cb9e..d31b0fbb1745 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -485,7 +485,8 @@ async def edit_published_room_list( ) ) if canonical_alias: - room_aliases.append(canonical_alias) + # Ensure we do not mutate room_aliases. + room_aliases = room_aliases + [canonical_alias] if not self.config.roomdirectory.is_publishing_room_allowed( user_id, room_id, room_aliases From d22c1c862c8259465a8e95c41eb1f00d0367a640 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 9 Feb 2023 13:04:24 -0500 Subject: [PATCH 161/344] Respond correctly to unknown methods on known endpoints (#14605) Respond with a 405 error if a request is received on a known endpoint, but to an unknown method, per MSC3743. --- changelog.d/14605.bugfix | 1 + docs/admin_api/media_admin_api.md | 10 ++++++- docs/upgrade.md | 10 +++++++ synapse/http/server.py | 40 ++++++++++---------------- synapse/rest/admin/media.py | 18 ++++++++---- synapse/rest/client/room_keys.py | 48 ++++++++++++++++++++----------- synapse/rest/client/tags.py | 4 ++- tests/rest/admin/test_media.py | 9 ++++-- 8 files changed, 89 insertions(+), 51 deletions(-) create mode 100644 changelog.d/14605.bugfix diff --git a/changelog.d/14605.bugfix b/changelog.d/14605.bugfix new file mode 100644 index 000000000000..cb95a87d9219 --- /dev/null +++ b/changelog.d/14605.bugfix @@ -0,0 +1 @@ +Return spec-compliant JSON errors when unknown endpoints are requested. diff --git a/docs/admin_api/media_admin_api.md b/docs/admin_api/media_admin_api.md index 7f8c8e22c1a0..30833f31091b 100644 --- a/docs/admin_api/media_admin_api.md +++ b/docs/admin_api/media_admin_api.md @@ -235,6 +235,14 @@ The following fields are returned in the JSON response body: Request: +``` +POST /_synapse/admin/v1/media/delete?before_ts= + +{} +``` + +*Deprecated in Synapse v1.78.0:* This API is available at the deprecated endpoint: + ``` POST /_synapse/admin/v1/media//delete?before_ts= @@ -243,7 +251,7 @@ POST /_synapse/admin/v1/media//delete?before_ts= URL Parameters -* `server_name`: string - The name of your local server (e.g `matrix.org`). +* `server_name`: string - The name of your local server (e.g `matrix.org`). *Deprecated in Synapse v1.78.0.* * `before_ts`: string representing a positive integer - Unix timestamp in milliseconds. Files that were last used before this timestamp will be deleted. It is the timestamp of last access, not the timestamp when the file was created. diff --git a/docs/upgrade.md b/docs/upgrade.md index bc143444bed6..15167b8c5825 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,15 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.78.0 + +## Deprecate the `/_synapse/admin/v1/media//delete` admin API + +Synapse 1.78.0 replaces the `/_synapse/admin/v1/media//delete` +admin API with an identical endpoint at `/_synapse/admin/v1/media/delete`. Please +update your tooling to use the new endpoint. The deprecated version will be removed +in a future release. + # Upgrading to v1.76.0 ## Faster joins are enabled by default @@ -137,6 +146,7 @@ and then do `pip install matrix-synapse[user-search]` for a PyPI install. Docker images and Debian packages need nothing specific as they already include or specify ICU as an explicit dependency. + # Upgrading to v1.73.0 ## Legacy Prometheus metric names have now been removed diff --git a/synapse/http/server.py b/synapse/http/server.py index 2563858f3cdf..9314454af106 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -30,7 +30,6 @@ Iterable, Iterator, List, - NoReturn, Optional, Pattern, Tuple, @@ -340,7 +339,8 @@ async def _async_render(self, request: SynapseRequest) -> Optional[Tuple[int, An return callback_return - return _unrecognised_request_handler(request) + # A request with an unknown method (for a known endpoint) was received. + raise UnrecognizedRequestError(code=405) @abc.abstractmethod def _send_response( @@ -396,7 +396,6 @@ def _send_error_response( @attr.s(slots=True, frozen=True, auto_attribs=True) class _PathEntry: - pattern: Pattern callback: ServletCallback servlet_classname: str @@ -425,13 +424,14 @@ def __init__( ): super().__init__(canonical_json, extract_context) self.clock = hs.get_clock() - self.path_regexs: Dict[bytes, List[_PathEntry]] = {} + # Map of path regex -> method -> callback. + self._routes: Dict[Pattern[str], Dict[bytes, _PathEntry]] = {} self.hs = hs def register_paths( self, method: str, - path_patterns: Iterable[Pattern], + path_patterns: Iterable[Pattern[str]], callback: ServletCallback, servlet_classname: str, ) -> None: @@ -455,8 +455,8 @@ def register_paths( for path_pattern in path_patterns: logger.debug("Registering for %s %s", method, path_pattern.pattern) - self.path_regexs.setdefault(method_bytes, []).append( - _PathEntry(path_pattern, callback, servlet_classname) + self._routes.setdefault(path_pattern, {})[method_bytes] = _PathEntry( + callback, servlet_classname ) def _get_handler_for_request( @@ -478,14 +478,17 @@ def _get_handler_for_request( # Loop through all the registered callbacks to check if the method # and path regex match - for path_entry in self.path_regexs.get(request_method, []): - m = path_entry.pattern.match(request_path) + for path_pattern, methods in self._routes.items(): + m = path_pattern.match(request_path) if m: - # We found a match! + # We found a matching path! + path_entry = methods.get(request_method) + if not path_entry: + raise UnrecognizedRequestError(code=405) return path_entry.callback, path_entry.servlet_classname, m.groupdict() - # Huh. No one wanted to handle that? Fiiiiiine. Send 400. - return _unrecognised_request_handler, "unrecognised_request_handler", {} + # Huh. No one wanted to handle that? Fiiiiiine. + raise UnrecognizedRequestError(code=404) async def _async_render(self, request: SynapseRequest) -> Tuple[int, Any]: callback, servlet_classname, group_dict = self._get_handler_for_request(request) @@ -567,19 +570,6 @@ def render_GET(self, request: Request) -> bytes: return super().render_GET(request) -def _unrecognised_request_handler(request: Request) -> NoReturn: - """Request handler for unrecognised requests - - This is a request handler suitable for return from - _get_handler_for_request. It actually just raises an - UnrecognizedRequestError. - - Args: - request: Unused, but passed in to match the signature of ServletCallback. - """ - raise UnrecognizedRequestError(code=404) - - class UnrecognizedRequestResource(resource.Resource): """ Similar to twisted.web.resource.NoResource, but returns a JSON 404 with an diff --git a/synapse/rest/admin/media.py b/synapse/rest/admin/media.py index 0d072c42a7aa..c134ccfb3d3c 100644 --- a/synapse/rest/admin/media.py +++ b/synapse/rest/admin/media.py @@ -15,7 +15,7 @@ import logging from http import HTTPStatus -from typing import TYPE_CHECKING, Tuple +from typing import TYPE_CHECKING, Optional, Tuple from synapse.api.constants import Direction from synapse.api.errors import Codes, NotFoundError, SynapseError @@ -285,7 +285,12 @@ class DeleteMediaByDateSize(RestServlet): timestamp and size. """ - PATTERNS = admin_patterns("/media/(?P[^/]*)/delete$") + PATTERNS = [ + *admin_patterns("/media/delete$"), + # This URL kept around for legacy reasons, it is undesirable since it + # overlaps with the DeleteMediaByID servlet. + *admin_patterns("/media/(?P[^/]*)/delete$"), + ] def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main @@ -294,7 +299,7 @@ def __init__(self, hs: "HomeServer"): self.media_repository = hs.get_media_repository() async def on_POST( - self, request: SynapseRequest, server_name: str + self, request: SynapseRequest, server_name: Optional[str] = None ) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) @@ -322,7 +327,8 @@ async def on_POST( errcode=Codes.INVALID_PARAM, ) - if self.server_name != server_name: + # This check is useless, we keep it for the legacy endpoint only. + if server_name is not None and self.server_name != server_name: raise SynapseError(HTTPStatus.BAD_REQUEST, "Can only delete local media") logging.info( @@ -489,6 +495,8 @@ def register_servlets_for_media_repo(hs: "HomeServer", http_server: HttpServer) ProtectMediaByID(hs).register(http_server) UnprotectMediaByID(hs).register(http_server) ListMediaInRoom(hs).register(http_server) - DeleteMediaByID(hs).register(http_server) + # XXX DeleteMediaByDateSize must be registered before DeleteMediaByID as + # their URL routes overlap. DeleteMediaByDateSize(hs).register(http_server) + DeleteMediaByID(hs).register(http_server) UserMediaRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/room_keys.py b/synapse/rest/client/room_keys.py index f7081f638e51..4e7ffdb5551e 100644 --- a/synapse/rest/client/room_keys.py +++ b/synapse/rest/client/room_keys.py @@ -259,6 +259,32 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() + async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + """ + Retrieve the version information about the most current backup version (if any) + + It takes out an exclusive lock on this user's room_key backups, to ensure + clients only upload to the current backup. + + Returns 404 if the given version does not exist. + + GET /room_keys/version HTTP/1.1 + { + "version": "12345", + "algorithm": "m.megolm_backup.v1", + "auth_data": "dGhpcyBzaG91bGQgYWN0dWFsbHkgYmUgZW5jcnlwdGVkIGpzb24K" + } + """ + requester = await self.auth.get_user_by_req(request, allow_guest=False) + user_id = requester.user.to_string() + + try: + info = await self.e2e_room_keys_handler.get_version_info(user_id) + except SynapseError as e: + if e.code == 404: + raise SynapseError(404, "No backup found", Codes.NOT_FOUND) + return 200, info + async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: """ Create a new backup version for this user's room_keys with the given @@ -301,7 +327,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: class RoomKeysVersionServlet(RestServlet): - PATTERNS = client_patterns("/room_keys/version(/(?P[^/]+))?$") + PATTERNS = client_patterns("/room_keys/version/(?P[^/]+)$") def __init__(self, hs: "HomeServer"): super().__init__() @@ -309,12 +335,11 @@ def __init__(self, hs: "HomeServer"): self.e2e_room_keys_handler = hs.get_e2e_room_keys_handler() async def on_GET( - self, request: SynapseRequest, version: Optional[str] + self, request: SynapseRequest, version: str ) -> Tuple[int, JsonDict]: """ Retrieve the version information about a given version of the user's - room_keys backup. If the version part is missing, returns info about the - most current backup version (if any) + room_keys backup. It takes out an exclusive lock on this user's room_key backups, to ensure clients only upload to the current backup. @@ -339,20 +364,16 @@ async def on_GET( return 200, info async def on_DELETE( - self, request: SynapseRequest, version: Optional[str] + self, request: SynapseRequest, version: str ) -> Tuple[int, JsonDict]: """ Delete the information about a given version of the user's - room_keys backup. If the version part is missing, deletes the most - current backup version (if any). Doesn't delete the actual room data. + room_keys backup. Doesn't delete the actual room data. DELETE /room_keys/version/12345 HTTP/1.1 HTTP/1.1 200 OK {} """ - if version is None: - raise SynapseError(400, "No version specified to delete", Codes.NOT_FOUND) - requester = await self.auth.get_user_by_req(request, allow_guest=False) user_id = requester.user.to_string() @@ -360,7 +381,7 @@ async def on_DELETE( return 200, {} async def on_PUT( - self, request: SynapseRequest, version: Optional[str] + self, request: SynapseRequest, version: str ) -> Tuple[int, JsonDict]: """ Update the information about a given version of the user's room_keys backup. @@ -386,11 +407,6 @@ async def on_PUT( user_id = requester.user.to_string() info = parse_json_object_from_request(request) - if version is None: - raise SynapseError( - 400, "No version specified to update", Codes.MISSING_PARAM - ) - await self.e2e_room_keys_handler.update_version(user_id, version, info) return 200, {} diff --git a/synapse/rest/client/tags.py b/synapse/rest/client/tags.py index ca638755c7ee..dde08417a407 100644 --- a/synapse/rest/client/tags.py +++ b/synapse/rest/client/tags.py @@ -34,7 +34,9 @@ class TagListServlet(RestServlet): GET /user/{user_id}/rooms/{room_id}/tags HTTP/1.1 """ - PATTERNS = client_patterns("/user/(?P[^/]*)/rooms/(?P[^/]*)/tags") + PATTERNS = client_patterns( + "/user/(?P[^/]*)/rooms/(?P[^/]*)/tags$" + ) def __init__(self, hs: "HomeServer"): super().__init__() diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index aadb31ca838e..db77a45ae39f 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -213,7 +213,8 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.admin_user_tok = self.login("admin", "pass") self.filepaths = MediaFilePaths(hs.config.media.media_store_path) - self.url = "/_synapse/admin/v1/media/%s/delete" % self.server_name + self.url = "/_synapse/admin/v1/media/delete" + self.legacy_url = "/_synapse/admin/v1/media/%s/delete" % self.server_name # Move clock up to somewhat realistic time self.reactor.advance(1000000000) @@ -332,11 +333,13 @@ def test_invalid_parameter(self) -> None: channel.json_body["error"], ) - def test_delete_media_never_accessed(self) -> None: + @parameterized.expand([(True,), (False,)]) + def test_delete_media_never_accessed(self, use_legacy_url: bool) -> None: """ Tests that media deleted if it is older than `before_ts` and never accessed `last_access_ts` is `NULL` and `created_ts` < `before_ts` """ + url = self.legacy_url if use_legacy_url else self.url # upload and do not access server_and_media_id = self._create_media() @@ -351,7 +354,7 @@ def test_delete_media_never_accessed(self) -> None: now_ms = self.clock.time_msec() channel = self.make_request( "POST", - self.url + "?before_ts=" + str(now_ms), + url + "?before_ts=" + str(now_ms), access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) From 218a383c43f23c05ddcd4c3f1ea315500136eff8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 9 Feb 2023 18:18:42 +0000 Subject: [PATCH 162/344] Bump ruff version from 0.0.230 to 0.0.237. (#15033) --- changelog.d/15033.misc | 1 + poetry.lock | 36 ++++++++++++++++++------------------ pyproject.toml | 2 +- 3 files changed, 20 insertions(+), 19 deletions(-) create mode 100644 changelog.d/15033.misc diff --git a/changelog.d/15033.misc b/changelog.d/15033.misc new file mode 100644 index 000000000000..83dc3a75b6b6 --- /dev/null +++ b/changelog.d/15033.misc @@ -0,0 +1 @@ +Bump ruff version from 0.0.230 to 0.0.237. diff --git a/poetry.lock b/poetry.lock index ba7b3a5d5dec..7274f8a53754 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1967,28 +1967,28 @@ jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] [[package]] name = "ruff" -version = "0.0.230" +version = "0.0.237" description = "An extremely fast Python linter, written in Rust." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.230-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:fcc31d02cebda0a85e2e13a44642aea7f84362cb4f589e2f6b864e3928e4a7db"}, - {file = "ruff-0.0.230-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:45a7f2c7155d520b8ca255a01235763d5c25fd5e7af055e50a78c6d91ece0ced"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4eca8b185ab56cac67acc23287c3c8c62a0c0ffadc0787a3bef3a6e77eaed82f"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec2bcdb5040efd8082a3a98369eec4bdc5fd05f53cc6714cb2b725d557d4abe8"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:26571aee2b93b60e47e44478f72a9787b387f752e85b85f176739bd91b27cfd1"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:4b69c9883c3e264f8bb2d52bdabb88b8d9672750ea05f33e0ff52532824bd5c5"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b3dc88b83f200378a9b9c91036989f0285a10759514c42235ce02e5824ac8d0"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:767716f008dd3a40ec2318396f648fda437c6968087a4526cde5879e382cf477"}, - {file = "ruff-0.0.230-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac27a0f9b96d9923cef7d911790a21a19b51aec0f08375ccc47ad735b1054d78"}, - {file = "ruff-0.0.230-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:729dfc7b7ad4f7d8761dc60c58f15372d6f5c2dd9b6c5952524f2bc3aec7de6a"}, - {file = "ruff-0.0.230-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ad086cf2e5fef274687121f673f0f9b60c8981ec07c2bb0448c459cbaef81bcb"}, - {file = "ruff-0.0.230-py3-none-musllinux_1_2_i686.whl", hash = "sha256:4feaed0978c24687133cd11c7380de20aa841f893e24430c735cc6c3faba4837"}, - {file = "ruff-0.0.230-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1d1046d0d43a0f24b2e9e61d76bb201b486ad02e9787d3432af43bd7d16f2c2e"}, - {file = "ruff-0.0.230-py3-none-win32.whl", hash = "sha256:4d627911c9ba57bcd2f2776f1c09a10d334db163cb5be8c892e7ec7b59ccf58c"}, - {file = "ruff-0.0.230-py3-none-win_amd64.whl", hash = "sha256:27fd4891a1d0642f5b2038ebf86f8169bc3d466964bdfaa0ce2a65149bc7cced"}, - {file = "ruff-0.0.230.tar.gz", hash = "sha256:a049f93af1057ac450e8c09559d44e371eda1c151b1b863c0013a1066fefddb0"}, + {file = "ruff-0.0.237-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:2ea04d826ffca58a7ae926115a801960c757d53c9027f2ca9acbe84c9f2b2f04"}, + {file = "ruff-0.0.237-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:8ed113937fab9f73f8c1a6c0350bb4fe03e951370139c6e0adb81f48a8dcf4c6"}, + {file = "ruff-0.0.237-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9bcb71a3efb5fe886eb48d739cfae5df4a15617e7b5a7668aa45ebf74c0d3fa"}, + {file = "ruff-0.0.237-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:80ce10718abbf502818c0d650ebab99fdcef5e937a1ded3884493ddff804373c"}, + {file = "ruff-0.0.237-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cc6cb7c1efcc260df5a939435649610a28f9f438b8b313384c8985ac6574f9f"}, + {file = "ruff-0.0.237-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7eef0c7a1e45a4e30328ae101613575944cbf47a3a11494bf9827722da6c66b3"}, + {file = "ruff-0.0.237-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0d122433a21ce4a21fbba34b73fc3add0ccddd1643b3ff5abb8d2767952f872e"}, + {file = "ruff-0.0.237-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b76311335adda4de3c1d471e64e89a49abfeebf02647e3db064e7740e7f36ed6"}, + {file = "ruff-0.0.237-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46c5977b643aaf2b6f84641265f835b6c7f67fcca38dbae08c4f15602e084ca0"}, + {file = "ruff-0.0.237-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3d6ed86d0d4d742360a262d52191581f12b669a68e59ae3b52e80d7483b3d7b3"}, + {file = "ruff-0.0.237-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fedfb60f986c26cdb1809db02866e68508db99910c587d2c4066a5c07aa85593"}, + {file = "ruff-0.0.237-py3-none-musllinux_1_2_i686.whl", hash = "sha256:bb96796be5919871fa9ae7e88968ba9e14306d9a3f217ca6c204f68a5abeccdd"}, + {file = "ruff-0.0.237-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ea239cfedf67b74ea4952e1074bb99a4281c2145441d70bc7e2f058d5c49f1c9"}, + {file = "ruff-0.0.237-py3-none-win32.whl", hash = "sha256:8d6a1d21ae15da2b1dcffeee2606e90de0e6717e72957da7d16ab6ae18dd0058"}, + {file = "ruff-0.0.237-py3-none-win_amd64.whl", hash = "sha256:525e5ec81cee29b993f77976026a6bf44528a14aa6edb1ef47bd8079147395ae"}, + {file = "ruff-0.0.237.tar.gz", hash = "sha256:630c575f543733adf6c19a11d9a02ca9ecc364bd7140af8a4c854d4728be6b56"}, ] [[package]] @@ -3025,4 +3025,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "2673ef0530a42dae1df998bacfcaf88a563529b39461003a980743a97f02996f" +content-hash = "16528ddab686d1bc3180ff37b09de35b904f68516cfdcc3942844163a126255e" diff --git a/pyproject.toml b/pyproject.toml index 1e59706104ad..2a21da6351e2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -311,7 +311,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.230" +ruff = "0.0.237" # Typechecking mypy = "*" From c1d2ce2901ab1c7cfaeebb4683af05a2ebf19fa6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 9 Feb 2023 19:57:01 +0000 Subject: [PATCH 163/344] Do not always start a db txn on Postgres (#14840) --- changelog.d/14840.misc | 1 + synapse/storage/prepare_database.py | 13 +++++++++---- 2 files changed, 10 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14840.misc diff --git a/changelog.d/14840.misc b/changelog.d/14840.misc new file mode 100644 index 000000000000..ff6084284a5e --- /dev/null +++ b/changelog.d/14840.misc @@ -0,0 +1 @@ +Prevent "WARNING: there is already a transaction in progress" lines appearing in PostgreSQL's logs on some occasions. \ No newline at end of file diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 3acdb39da75c..6c335a931501 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -23,7 +23,7 @@ from synapse.config.homeserver import HomeServerConfig from synapse.storage.database import LoggingDatabaseConnection -from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine +from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine from synapse.storage.schema import SCHEMA_COMPAT_VERSION, SCHEMA_VERSION from synapse.storage.types import Cursor @@ -108,9 +108,14 @@ def prepare_database( # so we start one before running anything. This ensures that any upgrades # are either applied completely, or not at all. # - # (psycopg2 automatically starts a transaction as soon as we run any statements - # at all, so this is redundant but harmless there.) - cur.execute("BEGIN TRANSACTION") + # psycopg2 does not automatically start transactions when in autocommit mode. + # While it is technically harmless to nest transactions in postgres, doing so + # results in a warning in Postgres' logs per query. And we'd rather like to + # avoid doing that. + if isinstance(database_engine, Sqlite3Engine) or ( + isinstance(database_engine, PostgresEngine) and db_conn.autocommit + ): + cur.execute("BEGIN TRANSACTION") logger.info("%r: Checking existing schema version", databases) version_info = _get_or_create_schema_state(cur, database_engine) From 03bccd542bcffe3ea12cd35108740a7d62dd38ab Mon Sep 17 00:00:00 2001 From: Shay Date: Thu, 9 Feb 2023 13:05:02 -0800 Subject: [PATCH 164/344] Add a class UnpersistedEventContext to allow for the batching up of storing state groups (#14675) * add class UnpersistedEventContext * modify create new client event to create unpersistedeventcontexts * persist event contexts after creation * fix tests to persist unpersisted event contexts * cleanup * misc lints + cleanup * changelog + fix comments * lints * fix batch insertion? * reduce redundant calculation * add unpersisted event classes * rework compute_event_context, split into function that returns unpersisted event context and then persists it * use calculate_context_info to create unpersisted event contexts * update typing * $%#^&* * black * fix comments and consolidate classes, use attr.s for class * requested changes * lint * requested changes * requested changes * refactor to be stupidly explicit * clearer renaming and flow * make partial state non-optional * update docstrings --------- Co-authored-by: Erik Johnston --- changelog.d/14675.misc | 1 + synapse/events/snapshot.py | 174 ++++++++++++++++++++++++- synapse/events/third_party_rules.py | 6 +- synapse/handlers/federation.py | 59 ++++++--- synapse/handlers/federation_event.py | 6 +- synapse/handlers/message.py | 42 +++--- synapse/state/__init__.py | 176 +++++++++++--------------- tests/handlers/test_user_directory.py | 4 +- tests/rest/admin/test_user.py | 4 +- tests/storage/test_redaction.py | 24 +++- tests/storage/test_state.py | 4 +- tests/test_utils/event_injection.py | 7 +- tests/test_visibility.py | 9 +- tests/utils.py | 5 +- 14 files changed, 359 insertions(+), 162 deletions(-) create mode 100644 changelog.d/14675.misc diff --git a/changelog.d/14675.misc b/changelog.d/14675.misc new file mode 100644 index 000000000000..bc1ac1c82aeb --- /dev/null +++ b/changelog.d/14675.misc @@ -0,0 +1 @@ +Add a class UnpersistedEventContext to allow for the batching up of storing state groups. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 6eaef8b57a03..e0d82ad81cf9 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -11,6 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from abc import ABC, abstractmethod from typing import TYPE_CHECKING, List, Optional, Tuple import attr @@ -26,8 +27,51 @@ from synapse.types.state import StateFilter +class UnpersistedEventContextBase(ABC): + """ + This is a base class for EventContext and UnpersistedEventContext, objects which + hold information relevant to storing an associated event. Note that an + UnpersistedEventContexts must be converted into an EventContext before it is + suitable to send to the db with its associated event. + + Attributes: + _storage: storage controllers for interfacing with the database + app_service: If the associated event is being sent by a (local) application service, that + app service. + """ + + def __init__(self, storage_controller: "StorageControllers"): + self._storage: "StorageControllers" = storage_controller + self.app_service: Optional[ApplicationService] = None + + @abstractmethod + async def persist( + self, + event: EventBase, + ) -> "EventContext": + """ + A method to convert an UnpersistedEventContext to an EventContext, suitable for + sending to the database with the associated event. + """ + pass + + @abstractmethod + async def get_prev_state_ids( + self, state_filter: Optional["StateFilter"] = None + ) -> StateMap[str]: + """ + Gets the room state at the event (ie not including the event if the event is a + state event). + + Args: + state_filter: specifies the type of state event to fetch from DB, example: + EventTypes.JoinRules + """ + pass + + @attr.s(slots=True, auto_attribs=True) -class EventContext: +class EventContext(UnpersistedEventContextBase): """ Holds information relevant to persisting an event @@ -77,9 +121,6 @@ class EventContext: delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group`` and ``state_group``. - app_service: If this event is being sent by a (local) application service, that - app service. - partial_state: if True, we may be storing this event with a temporary, incomplete state. """ @@ -122,6 +163,9 @@ def for_outlier( """Return an EventContext instance suitable for persisting an outlier event""" return EventContext(storage=storage) + async def persist(self, event: EventBase) -> "EventContext": + return self + async def serialize(self, event: EventBase, store: "DataStore") -> JsonDict: """Converts self to a type that can be serialized as JSON, and then deserialized by `deserialize` @@ -254,6 +298,128 @@ async def get_prev_state_ids( ) +@attr.s(slots=True, auto_attribs=True) +class UnpersistedEventContext(UnpersistedEventContextBase): + """ + The event context holds information about the state groups for an event. It is important + to remember that an event technically has two state groups: the state group before the + event, and the state group after the event. If the event is not a state event, the state + group will not change (ie the state group before the event will be the same as the state + group after the event), but if it is a state event the state group before the event + will differ from the state group after the event. + This is a version of an EventContext before the new state group (if any) has been + computed and stored. It contains information about the state before the event (which + also may be the information after the event, if the event is not a state event). The + UnpersistedEventContext must be converted into an EventContext by calling the method + 'persist' on it before it is suitable to be sent to the DB for processing. + + state_group_after_event: + The state group after the event. This will always be None until it is persisted. + If the event is not a state event, this will be the same as + state_group_before_event. + + state_group_before_event: + The ID of the state group representing the state of the room before this event. + + state_delta_due_to_event: + If the event is a state event, then this is the delta of the state between + `state_group` and `state_group_before_event` + + prev_group_for_state_group_before_event: + If it is known, ``state_group_before_event``'s previous state group. + + delta_ids_to_state_group_before_event: + If ``prev_group_for_state_group_before_event`` is not None, the state delta + between ``prev_group_for_state_group_before_event`` and ``state_group_before_event``. + + partial_state: + Whether the event has partial state. + + state_map_before_event: + A map of the state before the event, i.e. the state at `state_group_before_event` + """ + + _storage: "StorageControllers" + state_group_before_event: Optional[int] + state_group_after_event: Optional[int] + state_delta_due_to_event: Optional[dict] + prev_group_for_state_group_before_event: Optional[int] + delta_ids_to_state_group_before_event: Optional[StateMap[str]] + partial_state: bool + state_map_before_event: Optional[StateMap[str]] = None + + async def get_prev_state_ids( + self, state_filter: Optional["StateFilter"] = None + ) -> StateMap[str]: + """ + Gets the room state map, excluding this event. + + Args: + state_filter: specifies the type of state event to fetch from DB + + Returns: + Maps a (type, state_key) to the event ID of the state event matching + this tuple. + """ + if self.state_map_before_event: + return self.state_map_before_event + + assert self.state_group_before_event is not None + return await self._storage.state.get_state_ids_for_group( + self.state_group_before_event, state_filter + ) + + async def persist(self, event: EventBase) -> EventContext: + """ + Creates a full `EventContext` for the event, persisting any referenced state that + has not yet been persisted. + + Args: + event: event that the EventContext is associated with. + + Returns: An EventContext suitable for sending to the database with the event + for persisting + """ + assert self.partial_state is not None + + # If we have a full set of state for before the event but don't have a state + # group for that state, we need to get one + if self.state_group_before_event is None: + assert self.state_map_before_event + state_group_before_event = await self._storage.state.store_state_group( + event.event_id, + event.room_id, + prev_group=self.prev_group_for_state_group_before_event, + delta_ids=self.delta_ids_to_state_group_before_event, + current_state_ids=self.state_map_before_event, + ) + self.state_group_before_event = state_group_before_event + + # if the event isn't a state event the state group doesn't change + if not self.state_delta_due_to_event: + state_group_after_event = self.state_group_before_event + + # otherwise if it is a state event we need to get a state group for it + else: + state_group_after_event = await self._storage.state.store_state_group( + event.event_id, + event.room_id, + prev_group=self.state_group_before_event, + delta_ids=self.state_delta_due_to_event, + current_state_ids=None, + ) + + return EventContext.with_state( + storage=self._storage, + state_group=state_group_after_event, + state_group_before_event=self.state_group_before_event, + state_delta_due_to_event=self.state_delta_due_to_event, + partial_state=self.partial_state, + prev_group=self.state_group_before_event, + delta_ids=self.state_delta_due_to_event, + ) + + def _encode_state_dict( state_dict: Optional[StateMap[str]], ) -> Optional[List[Tuple[str, str, str]]]: diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 72ab69689887..97c61cc2586b 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -18,7 +18,7 @@ from synapse.api.errors import ModuleFailedException, SynapseError from synapse.events import EventBase -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import UnpersistedEventContextBase from synapse.storage.roommember import ProfileInfo from synapse.types import Requester, StateMap from synapse.util.async_helpers import delay_cancellation, maybe_awaitable @@ -231,7 +231,9 @@ def register_third_party_rules_callbacks( self._on_threepid_bind_callbacks.append(on_threepid_bind) async def check_event_allowed( - self, event: EventBase, context: EventContext + self, + event: EventBase, + context: UnpersistedEventContextBase, ) -> Tuple[bool, Optional[dict]]: """Check if a provided event should be allowed in the given context. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 7f64130e0aa1..43ed4a3dd1f1 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -56,7 +56,7 @@ from synapse.crypto.event_signing import compute_event_signature from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import EventContext, UnpersistedEventContextBase from synapse.events.validator import EventValidator from synapse.federation.federation_client import InvalidResponseError from synapse.http.servlet import assert_params_in_dict @@ -990,7 +990,10 @@ async def on_make_join_request( ) try: - event, context = await self.event_creation_handler.create_new_client_event( + ( + event, + unpersisted_context, + ) = await self.event_creation_handler.create_new_client_event( builder=builder ) except SynapseError as e: @@ -998,7 +1001,9 @@ async def on_make_join_request( raise # Ensure the user can even join the room. - await self._federation_event_handler.check_join_restrictions(context, event) + await self._federation_event_handler.check_join_restrictions( + unpersisted_context, event + ) # The remote hasn't signed it yet, obviously. We'll do the full checks # when we get the event back in `on_send_join_request` @@ -1178,7 +1183,7 @@ async def on_make_leave_request( }, ) - event, context = await self.event_creation_handler.create_new_client_event( + event, _ = await self.event_creation_handler.create_new_client_event( builder=builder ) @@ -1228,12 +1233,13 @@ async def on_make_knock_request( }, ) - event, context = await self.event_creation_handler.create_new_client_event( - builder=builder - ) + ( + event, + unpersisted_context, + ) = await self.event_creation_handler.create_new_client_event(builder=builder) event_allowed, _ = await self.third_party_event_rules.check_event_allowed( - event, context + event, unpersisted_context ) if not event_allowed: logger.warning("Creation of knock %s forbidden by third-party rules", event) @@ -1406,15 +1412,20 @@ async def exchange_third_party_invite( try: ( event, - context, + unpersisted_context, ) = await self.event_creation_handler.create_new_client_event( builder=builder ) - event, context = await self.add_display_name_to_third_party_invite( - room_version_obj, event_dict, event, context + ( + event, + unpersisted_context, + ) = await self.add_display_name_to_third_party_invite( + room_version_obj, event_dict, event, unpersisted_context ) + context = await unpersisted_context.persist(event) + EventValidator().validate_new(event, self.config) # We need to tell the transaction queue to send this out, even @@ -1483,14 +1494,19 @@ async def on_exchange_third_party_invite_request( try: ( event, - context, + unpersisted_context, ) = await self.event_creation_handler.create_new_client_event( builder=builder ) - event, context = await self.add_display_name_to_third_party_invite( - room_version_obj, event_dict, event, context + ( + event, + unpersisted_context, + ) = await self.add_display_name_to_third_party_invite( + room_version_obj, event_dict, event, unpersisted_context ) + context = await unpersisted_context.persist(event) + try: validate_event_for_room_version(event) await self._event_auth_handler.check_auth_rules_from_context(event) @@ -1522,8 +1538,8 @@ async def add_display_name_to_third_party_invite( room_version_obj: RoomVersion, event_dict: JsonDict, event: EventBase, - context: EventContext, - ) -> Tuple[EventBase, EventContext]: + context: UnpersistedEventContextBase, + ) -> Tuple[EventBase, UnpersistedEventContextBase]: key = ( EventTypes.ThirdPartyInvite, event.content["third_party_invite"]["signed"]["token"], @@ -1557,11 +1573,14 @@ async def add_display_name_to_third_party_invite( room_version_obj, event_dict ) EventValidator().validate_builder(builder) - event, context = await self.event_creation_handler.create_new_client_event( - builder=builder - ) + + ( + event, + unpersisted_context, + ) = await self.event_creation_handler.create_new_client_event(builder=builder) + EventValidator().validate_new(event, self.config) - return event, context + return event, unpersisted_context async def _check_signature(self, event: EventBase, context: EventContext) -> None: """ diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index e037acbca2bf..3561f2f1de5e 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -58,7 +58,7 @@ validate_event_for_room_version, ) from synapse.events import EventBase -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import EventContext, UnpersistedEventContextBase from synapse.federation.federation_client import InvalidResponseError, PulledPduInfo from synapse.logging.context import nested_logging_context from synapse.logging.opentracing import ( @@ -426,7 +426,9 @@ async def on_send_membership_event( return event, context async def check_join_restrictions( - self, context: EventContext, event: EventBase + self, + context: UnpersistedEventContextBase, + event: EventBase, ) -> None: """Check that restrictions in restricted join rules are matched diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 5f6da2943fc8..3e30f52e4d9f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -48,7 +48,7 @@ from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase, relation_from_event from synapse.events.builder import EventBuilder -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import EventContext, UnpersistedEventContextBase from synapse.events.utils import maybe_upsert_event_field from synapse.events.validator import EventValidator from synapse.handlers.directory import DirectoryHandler @@ -708,7 +708,7 @@ async def create_event( builder.internal_metadata.historical = historical - event, context = await self.create_new_client_event( + event, unpersisted_context = await self.create_new_client_event( builder=builder, requester=requester, allow_no_prev_events=allow_no_prev_events, @@ -721,6 +721,8 @@ async def create_event( current_state_group=current_state_group, ) + context = await unpersisted_context.persist(event) + # In an ideal world we wouldn't need the second part of this condition. However, # this behaviour isn't spec'd yet, meaning we should be able to deactivate this # behaviour. Another reason is that this code is also evaluated each time a new @@ -1083,13 +1085,14 @@ async def create_new_client_event( state_map: Optional[StateMap[str]] = None, for_batch: bool = False, current_state_group: Optional[int] = None, - ) -> Tuple[EventBase, EventContext]: + ) -> Tuple[EventBase, UnpersistedEventContextBase]: """Create a new event for a local client. If bool for_batch is true, will create an event using the prev_event_ids, and will create an event context for the event using the parameters state_map and current_state_group, thus these parameters must be provided in this case if for_batch is True. The subsequently created event and context are suitable for being batched up and bulk persisted to the database - with other similarly created events. + with other similarly created events. Note that this returns an UnpersistedEventContext, + which must be converted to an EventContext before it can be sent to the DB. Args: builder: @@ -1131,7 +1134,7 @@ async def create_new_client_event( batch persisting Returns: - Tuple of created event, context + Tuple of created event, UnpersistedEventContext """ # Strip down the state_event_ids to only what we need to auth the event. # For example, we don't need extra m.room.member that don't match event.sender @@ -1192,9 +1195,16 @@ async def create_new_client_event( event = await builder.build( prev_event_ids=prev_event_ids, auth_event_ids=auth_ids, depth=depth ) - context = await self.state.compute_event_context_for_batched( - event, state_map, current_state_group + + context: UnpersistedEventContextBase = ( + await self.state.calculate_context_info( + event, + state_ids_before_event=state_map, + partial_state=False, + state_group_before_event=current_state_group, + ) ) + else: event = await builder.build( prev_event_ids=prev_event_ids, @@ -1244,16 +1254,17 @@ async def create_new_client_event( state_map_for_event[(data.event_type, data.state_key)] = state_id - context = await self.state.compute_event_context( + # TODO(faster_joins): check how MSC2716 works and whether we can have + # partial state here + # https://github.com/matrix-org/synapse/issues/13003 + context = await self.state.calculate_context_info( event, state_ids_before_event=state_map_for_event, - # TODO(faster_joins): check how MSC2716 works and whether we can have - # partial state here - # https://github.com/matrix-org/synapse/issues/13003 partial_state=False, ) + else: - context = await self.state.compute_event_context(event) + context = await self.state.calculate_context_info(event) if requester: context.app_service = requester.app_service @@ -2082,9 +2093,9 @@ def _expire_rooms_to_exclude_from_dummy_event_insertion(self) -> None: async def _rebuild_event_after_third_party_rules( self, third_party_result: dict, original_event: EventBase - ) -> Tuple[EventBase, EventContext]: + ) -> Tuple[EventBase, UnpersistedEventContextBase]: # the third_party_event_rules want to replace the event. - # we do some basic checks, and then return the replacement event and context. + # we do some basic checks, and then return the replacement event. # Construct a new EventBuilder and validate it, which helps with the # rest of these checks. @@ -2138,5 +2149,6 @@ async def _rebuild_event_after_third_party_rules( # we rebuild the event context, to be on the safe side. If nothing else, # delta_ids might need an update. - context = await self.state.compute_event_context(event) + context = await self.state.calculate_context_info(event) + return event, context diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index fdfb46ab82ad..e877e6f1a12b 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -39,7 +39,11 @@ from synapse.api.constants import EventTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions from synapse.events import EventBase -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import ( + EventContext, + UnpersistedEventContext, + UnpersistedEventContextBase, +) from synapse.logging.context import ContextResourceUsage from synapse.replication.http.state import ReplicationUpdateCurrentStateRestServlet from synapse.state import v1, v2 @@ -262,31 +266,31 @@ async def get_hosts_in_room_at_events( state = await entry.get_state(self._state_storage_controller, StateFilter.all()) return await self.store.get_joined_hosts(room_id, state, entry) - async def compute_event_context( + async def calculate_context_info( self, event: EventBase, state_ids_before_event: Optional[StateMap[str]] = None, partial_state: Optional[bool] = None, - ) -> EventContext: - """Build an EventContext structure for a non-outlier event. - - (for an outlier, call EventContext.for_outlier directly) - - This works out what the current state should be for the event, and - generates a new state group if necessary. - - Args: - event: - state_ids_before_event: The event ids of the state before the event if - it can't be calculated from existing events. This is normally - only specified when receiving an event from federation where we - don't have the prev events, e.g. when backfilling. - partial_state: - `True` if `state_ids_before_event` is partial and omits non-critical - membership events. - `False` if `state_ids_before_event` is the full state. - `None` when `state_ids_before_event` is not provided. In this case, the - flag will be calculated based on `event`'s prev events. + state_group_before_event: Optional[int] = None, + ) -> UnpersistedEventContextBase: + """ + Calulates the contents of an unpersisted event context, other than the current + state group (which is either provided or calculated when the event context is persisted) + + state_ids_before_event: + The event ids of the full state before the event if + it can't be calculated from existing events. This is normally + only specified when receiving an event from federation where we + don't have the prev events, e.g. when backfilling or when the event + is being created for batch persisting. + partial_state: + `True` if `state_ids_before_event` is partial and omits non-critical + membership events. + `False` if `state_ids_before_event` is the full state. + `None` when `state_ids_before_event` is not provided. In this case, the + flag will be calculated based on `event`'s prev events. + state_group_before_event: + the current state group at the time of event, if known Returns: The event context. @@ -294,7 +298,6 @@ async def compute_event_context( RuntimeError if `state_ids_before_event` is not provided and one or more prev events are missing or outliers. """ - assert not event.internal_metadata.is_outlier() # @@ -306,17 +309,6 @@ async def compute_event_context( state_group_before_event_prev_group = None deltas_to_state_group_before_event = None - # .. though we need to get a state group for it. - state_group_before_event = ( - await self._state_storage_controller.store_state_group( - event.event_id, - event.room_id, - prev_group=None, - delta_ids=None, - current_state_ids=state_ids_before_event, - ) - ) - # the partial_state flag must be provided assert partial_state is not None else: @@ -345,6 +337,7 @@ async def compute_event_context( logger.debug("calling resolve_state_groups from compute_event_context") # we've already taken into account partial state, so no need to wait for # complete state here. + entry = await self.resolve_state_groups_for_events( event.room_id, event.prev_event_ids(), @@ -383,18 +376,19 @@ async def compute_event_context( # if not event.is_state(): - return EventContext.with_state( + return UnpersistedEventContext( storage=self._storage_controllers, state_group_before_event=state_group_before_event, - state_group=state_group_before_event, + state_group_after_event=state_group_before_event, state_delta_due_to_event={}, - prev_group=state_group_before_event_prev_group, - delta_ids=deltas_to_state_group_before_event, + prev_group_for_state_group_before_event=state_group_before_event_prev_group, + delta_ids_to_state_group_before_event=deltas_to_state_group_before_event, partial_state=partial_state, + state_map_before_event=state_ids_before_event, ) # - # otherwise, we'll need to create a new state group for after the event + # otherwise, we'll need to set up creating a new state group for after the event # key = (event.type, event.state_key) @@ -412,88 +406,60 @@ async def compute_event_context( delta_ids = {key: event.event_id} - state_group_after_event = ( - await self._state_storage_controller.store_state_group( - event.event_id, - event.room_id, - prev_group=state_group_before_event, - delta_ids=delta_ids, - current_state_ids=None, - ) - ) - - return EventContext.with_state( + return UnpersistedEventContext( storage=self._storage_controllers, - state_group=state_group_after_event, state_group_before_event=state_group_before_event, + state_group_after_event=None, state_delta_due_to_event=delta_ids, - prev_group=state_group_before_event, - delta_ids=delta_ids, + prev_group_for_state_group_before_event=state_group_before_event_prev_group, + delta_ids_to_state_group_before_event=deltas_to_state_group_before_event, partial_state=partial_state, + state_map_before_event=state_ids_before_event, ) - async def compute_event_context_for_batched( + async def compute_event_context( self, event: EventBase, - state_ids_before_event: StateMap[str], - current_state_group: int, + state_ids_before_event: Optional[StateMap[str]] = None, + partial_state: Optional[bool] = None, ) -> EventContext: - """ - Generate an event context for an event that has not yet been persisted to the - database. Intended for use with events that are created to be persisted in a batch. - Args: - event: the event the context is being computed for - state_ids_before_event: a state map consisting of the state ids of the events - created prior to this event. - current_state_group: the current state group before the event. - """ - state_group_before_event_prev_group = None - deltas_to_state_group_before_event = None - - state_group_before_event = current_state_group - - # if the event is not state, we are set - if not event.is_state(): - return EventContext.with_state( - storage=self._storage_controllers, - state_group_before_event=state_group_before_event, - state_group=state_group_before_event, - state_delta_due_to_event={}, - prev_group=state_group_before_event_prev_group, - delta_ids=deltas_to_state_group_before_event, - partial_state=False, - ) + """Build an EventContext structure for a non-outlier event. - # otherwise, we'll need to create a new state group for after the event - key = (event.type, event.state_key) + (for an outlier, call EventContext.for_outlier directly) - if state_ids_before_event is not None: - replaces = state_ids_before_event.get(key) + This works out what the current state should be for the event, and + generates a new state group if necessary. - if replaces and replaces != event.event_id: - event.unsigned["replaces_state"] = replaces + Args: + event: + state_ids_before_event: The event ids of the state before the event if + it can't be calculated from existing events. This is normally + only specified when receiving an event from federation where we + don't have the prev events, e.g. when backfilling. + partial_state: + `True` if `state_ids_before_event` is partial and omits non-critical + membership events. + `False` if `state_ids_before_event` is the full state. + `None` when `state_ids_before_event` is not provided. In this case, the + flag will be calculated based on `event`'s prev events. + entry: + A state cache entry for the resolved state across the prev events. We may + have already calculated this, so if it's available pass it in + Returns: + The event context. - delta_ids = {key: event.event_id} + Raises: + RuntimeError if `state_ids_before_event` is not provided and one or more + prev events are missing or outliers. + """ - state_group_after_event = ( - await self._state_storage_controller.store_state_group( - event.event_id, - event.room_id, - prev_group=state_group_before_event, - delta_ids=delta_ids, - current_state_ids=None, - ) + unpersisted_context = await self.calculate_context_info( + event=event, + state_ids_before_event=state_ids_before_event, + partial_state=partial_state, ) - return EventContext.with_state( - storage=self._storage_controllers, - state_group=state_group_after_event, - state_group_before_event=state_group_before_event, - state_delta_due_to_event=delta_ids, - prev_group=state_group_before_event, - delta_ids=delta_ids, - partial_state=False, - ) + return await unpersisted_context.persist(event) @measure_func() async def resolve_state_groups_for_events( diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index 75fc5a17a47b..e9be5fb504f8 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -949,10 +949,12 @@ def _add_user_to_room( }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) + self.get_success( self.hs.get_storage_controllers().persistence.persist_event(event, context) ) diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 5c1ced355ff1..b50406e12929 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -2934,10 +2934,12 @@ def test_get_rooms_with_nonlocal_user(self) -> None: }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) + self.get_success(storage_controllers.persistence.persist_event(event, context)) # Now get rooms diff --git a/tests/storage/test_redaction.py b/tests/storage/test_redaction.py index df4740f9d9f2..0100f7da14c6 100644 --- a/tests/storage/test_redaction.py +++ b/tests/storage/test_redaction.py @@ -74,10 +74,12 @@ def inject_room_member( # type: ignore[override] }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) + self.get_success(self._persistence.persist_event(event, context)) return event @@ -96,10 +98,12 @@ def inject_message(self, room: RoomID, user: UserID, body: str) -> EventBase: }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) + self.get_success(self._persistence.persist_event(event, context)) return event @@ -119,10 +123,12 @@ def inject_redaction( }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) + self.get_success(self._persistence.persist_event(event, context)) return event @@ -259,7 +265,7 @@ def type(self) -> str: def internal_metadata(self) -> _EventInternalMetadata: return self._base_builder.internal_metadata - event_1, context_1 = self.get_success( + event_1, unpersisted_context_1 = self.get_success( self.event_creation_handler.create_new_client_event( cast( EventBuilder, @@ -280,9 +286,11 @@ def internal_metadata(self) -> _EventInternalMetadata: ) ) + context_1 = self.get_success(unpersisted_context_1.persist(event_1)) + self.get_success(self._persistence.persist_event(event_1, context_1)) - event_2, context_2 = self.get_success( + event_2, unpersisted_context_2 = self.get_success( self.event_creation_handler.create_new_client_event( cast( EventBuilder, @@ -302,6 +310,8 @@ def internal_metadata(self) -> _EventInternalMetadata: ) ) ) + + context_2 = self.get_success(unpersisted_context_2.persist(event_2)) self.get_success(self._persistence.persist_event(event_2, context_2)) # fetch one of the redactions @@ -421,10 +431,12 @@ def test_store_redacted_redaction(self) -> None: }, ) - redaction_event, context = self.get_success( + redaction_event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(redaction_event)) + self.get_success(self._persistence.persist_event(redaction_event, context)) # Now lets jump to the future where we have censored the redaction event diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index bad7f0bc6026..f730b888f7d2 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -67,10 +67,12 @@ def inject_state_event( }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) + assert self.storage.persistence is not None self.get_success(self.storage.persistence.persist_event(event, context)) diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index 1a50c2acf12a..a6330ed840f0 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -92,8 +92,13 @@ async def create_event( builder = hs.get_event_builder_factory().for_room_version( KNOWN_ROOM_VERSIONS[room_version], kwargs ) - event, context = await hs.get_event_creation_handler().create_new_client_event( + ( + event, + unpersisted_context, + ) = await hs.get_event_creation_handler().create_new_client_event( builder, prev_event_ids=prev_event_ids ) + context = await unpersisted_context.persist(event) + return event, context diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 875e37988f5d..36d6b37aa421 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -175,9 +175,10 @@ def _inject_visibility(self, user_id: str, visibility: str) -> EventBase: }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( self._storage_controllers.persistence.persist_event(event, context) ) @@ -202,9 +203,10 @@ def _inject_room_member( }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( self._storage_controllers.persistence.persist_event(event, context) @@ -226,9 +228,10 @@ def _inject_message( }, ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_new_client_event(builder) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( self._storage_controllers.persistence.persist_event(event, context) diff --git a/tests/utils.py b/tests/utils.py index d76bf9716ad2..15fabbc2d01e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -335,6 +335,9 @@ async def create_room(hs: HomeServer, room_id: str, creator_id: str) -> None: }, ) - event, context = await event_creation_handler.create_new_client_event(builder) + event, unpersisted_context = await event_creation_handler.create_new_client_event( + builder + ) + context = await unpersisted_context.persist(event) await persistence_store.persist_event(event, context) From a5a799722db0c33dc61fb2c6c7282ff7e82eb2e9 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 9 Feb 2023 22:33:39 +0000 Subject: [PATCH 165/344] Tag federation request spans with the worker name (#15042) * Systematically include worker name as process info * Changelog * don't bother with inner setdefault --- changelog.d/15042.feature | 1 + synapse/api/auth.py | 7 ------- synapse/logging/opentracing.py | 10 +++++++++- 3 files changed, 10 insertions(+), 8 deletions(-) create mode 100644 changelog.d/15042.feature diff --git a/changelog.d/15042.feature b/changelog.d/15042.feature new file mode 100644 index 000000000000..7a4de89f0070 --- /dev/null +++ b/changelog.d/15042.feature @@ -0,0 +1 @@ +Tag opentracing spans for federation requests with the name of the worker serving the request. diff --git a/synapse/api/auth.py b/synapse/api/auth.py index 3d7f986ac74a..66e869bc2db2 100644 --- a/synapse/api/auth.py +++ b/synapse/api/auth.py @@ -32,7 +32,6 @@ from synapse.http import get_request_user_agent from synapse.http.site import SynapseRequest from synapse.logging.opentracing import ( - SynapseTags, active_span, force_tracing, start_active_span, @@ -162,12 +161,6 @@ async def get_user_by_req( parent_span.set_tag( "authenticated_entity", requester.authenticated_entity ) - # We tag the Synapse instance name so that it's an easy jumping - # off point into the logs. Can also be used to filter for an - # instance that is under load. - parent_span.set_tag( - SynapseTags.INSTANCE_NAME, self.hs.get_instance_name() - ) parent_span.set_tag("user_id", requester.user.to_string()) if requester.device_id is not None: parent_span.set_tag("device_id", requester.device_id) diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 8ef9a0dda8e5..6c7cf1b29487 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -466,8 +466,16 @@ def init_tracer(hs: "HomeServer") -> None: STRIP_INSTANCE_NUMBER_SUFFIX_REGEX, "", hs.get_instance_name() ) + jaeger_config = hs.config.tracing.jaeger_config + tags = jaeger_config.setdefault("tags", {}) + + # tag the Synapse instance name so that it's an easy jumping + # off point into the logs. Can also be used to filter for an + # instance that is under load. + tags[SynapseTags.INSTANCE_NAME] = hs.get_instance_name() + config = JaegerConfig( - config=hs.config.tracing.jaeger_config, + config=jaeger_config, service_name=f"{hs.config.server.server_name} {instance_name_by_type}", scope_manager=LogContextScopeManager(), metrics_factory=PrometheusMetricsFactory(), From fd296b7343f2e557519f1ec81325ad836bcbdbf9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 10 Feb 2023 10:52:35 +0100 Subject: [PATCH 166/344] Fix exception on start up about device lists (#15041) Fixes #15010. --- changelog.d/15041.misc | 1 + synapse/storage/databases/main/devices.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/15041.misc diff --git a/changelog.d/15041.misc b/changelog.d/15041.misc new file mode 100644 index 000000000000..d602b0043a85 --- /dev/null +++ b/changelog.d/15041.misc @@ -0,0 +1 @@ +Fix a rare exception in logs on start up. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index e8b6cc6b8014..766c2052fbda 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -100,6 +100,7 @@ def __init__( ("device_lists_outbound_pokes", "stream_id"), ("device_lists_changes_in_room", "stream_id"), ("device_lists_remote_pending", "stream_id"), + ("device_lists_changes_converted_stream_position", "stream_id"), ], is_writer=hs.config.worker.worker_app is None, ) From 73b8068ced20038ecffc979a043d77587d50941c Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 10 Feb 2023 12:44:38 +0000 Subject: [PATCH 167/344] 1.77.0rc2 --- CHANGES.md | 15 +++++++++++++++ changelog.d/15024.bugfix | 1 - changelog.d/15036.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 5 files changed, 22 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/15024.bugfix delete mode 100644 changelog.d/15036.misc diff --git a/CHANGES.md b/CHANGES.md index d6bcacef5f32..a2cb957f1695 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,18 @@ +Synapse 1.77.0rc2 (2023-02-10) +============================== + +Bugfixes +-------- + +- Fix bug where retried replication requests would return a failure. Introduced in v1.76.0. ([\#15024](https://github.com/matrix-org/synapse/issues/15024)) + + +Internal Changes +---------------- + +- Prepare for future database schema changes. ([\#15036](https://github.com/matrix-org/synapse/issues/15036)) + + Synapse 1.77.0rc1 (2023-02-07) ============================== diff --git a/changelog.d/15024.bugfix b/changelog.d/15024.bugfix deleted file mode 100644 index dddd406322ee..000000000000 --- a/changelog.d/15024.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where retried replication requests would return a failure. Introduced in v1.76.0. diff --git a/changelog.d/15036.misc b/changelog.d/15036.misc deleted file mode 100644 index b0adc9c9d1d6..000000000000 --- a/changelog.d/15036.misc +++ /dev/null @@ -1 +0,0 @@ -Prepare for future database schema changes. diff --git a/debian/changelog b/debian/changelog index becc41c1217a..461953742bac 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.77.0~rc2) stable; urgency=medium + + * New Synapse release 1.77.0rc2. + + -- Synapse Packaging team Fri, 10 Feb 2023 12:44:21 +0000 + matrix-synapse-py3 (1.77.0~rc1) stable; urgency=medium * New Synapse release 1.77.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 1e59706104ad..921a1fccbc94 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.77.0rc1" +version = "1.77.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From a481fb9f98ad10e5e129bdc7664c59498a7332f6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 10 Feb 2023 08:09:47 -0500 Subject: [PATCH 168/344] Refactor get_user_devices_from_cache to avoid mutating cached values. (#15040) The previous version of the code could mutate a cached value, but only if the input requested all devices of a user *and* a specific device. To avoid this nonsensical situation we no longer fetch a specific device ID if all of a user's devices are returned. --- changelog.d/15040.misc | 1 + synapse/handlers/e2e_keys.py | 11 +++++--- synapse/storage/databases/main/devices.py | 31 +++++++++++++---------- 3 files changed, 25 insertions(+), 18 deletions(-) create mode 100644 changelog.d/15040.misc diff --git a/changelog.d/15040.misc b/changelog.d/15040.misc new file mode 100644 index 000000000000..ca129b64afc5 --- /dev/null +++ b/changelog.d/15040.misc @@ -0,0 +1 @@ +Avoid mutating a cached value in `get_user_devices_from_cache`. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index d2188ca08f82..43cbece21b69 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -159,19 +159,22 @@ async def query_devices( # A map of destination -> user ID -> device IDs. remote_queries_not_in_cache: Dict[str, Dict[str, Iterable[str]]] = {} if remote_queries: - query_list: List[Tuple[str, Optional[str]]] = [] + user_ids = set() + user_and_device_ids: List[Tuple[str, str]] = [] for user_id, device_ids in remote_queries.items(): if device_ids: - query_list.extend( + user_and_device_ids.extend( (user_id, device_id) for device_id in device_ids ) else: - query_list.append((user_id, None)) + user_ids.add(user_id) ( user_ids_not_in_cache, remote_results, - ) = await self.store.get_user_devices_from_cache(query_list) + ) = await self.store.get_user_devices_from_cache( + user_ids, user_and_device_ids + ) # Check that the homeserver still shares a room with all cached users. # Note that this check may be slightly racy when a remote user leaves a diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 766c2052fbda..85c1778a81ce 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -746,42 +746,45 @@ def _add_user_signature_change_txn( @trace @cancellable async def get_user_devices_from_cache( - self, query_list: List[Tuple[str, Optional[str]]] + self, user_ids: Set[str], user_and_device_ids: List[Tuple[str, str]] ) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]: """Get the devices (and keys if any) for remote users from the cache. Args: - query_list: List of (user_id, device_ids), if device_ids is - falsey then return all device ids for that user. + user_ids: users which should have all device IDs returned + user_and_device_ids: List of (user_id, device_ids) Returns: A tuple of (user_ids_not_in_cache, results_map), where user_ids_not_in_cache is a set of user_ids and results_map is a mapping of user_id -> device_id -> device_info. """ - user_ids = {user_id for user_id, _ in query_list} - user_map = await self.get_device_list_last_stream_id_for_remotes(list(user_ids)) + unique_user_ids = user_ids | {user_id for user_id, _ in user_and_device_ids} + user_map = await self.get_device_list_last_stream_id_for_remotes( + list(unique_user_ids) + ) # We go and check if any of the users need to have their device lists # resynced. If they do then we remove them from the cached list. users_needing_resync = await self.get_user_ids_requiring_device_list_resync( - user_ids + unique_user_ids ) user_ids_in_cache = { user_id for user_id, stream_id in user_map.items() if stream_id } - users_needing_resync - user_ids_not_in_cache = user_ids - user_ids_in_cache + user_ids_not_in_cache = unique_user_ids - user_ids_in_cache + # First fetch all the users which all devices are to be returned. results: Dict[str, Dict[str, JsonDict]] = {} - for user_id, device_id in query_list: - if user_id not in user_ids_in_cache: - continue - - if device_id: + for user_id in user_ids: + if user_id in user_ids_in_cache: + results[user_id] = await self.get_cached_devices_for_user(user_id) + # Then fetch all device-specific requests, but skip users we've already + # fetched all devices for. + for user_id, device_id in user_and_device_ids: + if user_id in user_ids_in_cache and user_id not in user_ids: device = await self._get_cached_user_device(user_id, device_id) results.setdefault(user_id, {})[device_id] = device - else: - results[user_id] = await self.get_cached_devices_for_user(user_id) set_tag("in_cache", str(results)) set_tag("not_in_cache", str(user_ids_not_in_cache)) From b95407908dfde97e483952722b6fa7a533ff5093 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 10 Feb 2023 13:11:20 +0000 Subject: [PATCH 169/344] Avoid mutating cached values in `_generate_sync_entry_for_account_data` (#15047) --- changelog.d/15047.misc | 1 + synapse/handlers/sync.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/15047.misc diff --git a/changelog.d/15047.misc b/changelog.d/15047.misc new file mode 100644 index 000000000000..561dc874de0e --- /dev/null +++ b/changelog.d/15047.misc @@ -0,0 +1 @@ +Avoid mutating cached values in `_generate_sync_entry_for_account_data`. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 3566537894e6..202b35eee6d3 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1753,6 +1753,7 @@ async def _generate_sync_entry_for_account_data( ) if push_rules_changed: + global_account_data = dict(global_account_data) global_account_data["m.push_rules"] = await self.push_rules_for_user( sync_config.user ) @@ -1763,6 +1764,7 @@ async def _generate_sync_entry_for_account_data( account_data_by_room, ) = await self.store.get_account_data_for_user(sync_config.user.to_string()) + global_account_data = dict(global_account_data) global_account_data["m.push_rules"] = await self.push_rules_for_user( sync_config.user ) From cf5233b783273efc84b991e7242fb4761ccc201a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 10 Feb 2023 09:22:16 -0500 Subject: [PATCH 170/344] Avoid fetching unused account data in sync. (#14973) The per-room account data is no longer unconditionally fetched, even if all rooms will be filtered out. Global account data will not be fetched if it will all be filtered out. --- changelog.d/14973.misc | 1 + synapse/api/filtering.py | 30 ++++- synapse/handlers/account_data.py | 10 +- synapse/handlers/initial_sync.py | 5 +- synapse/handlers/room_member.py | 2 +- synapse/handlers/sync.py | 88 ++++++------ synapse/rest/admin/users.py | 3 +- .../storage/databases/main/account_data.py | 127 +++++++++++++----- 8 files changed, 176 insertions(+), 90 deletions(-) create mode 100644 changelog.d/14973.misc diff --git a/changelog.d/14973.misc b/changelog.d/14973.misc new file mode 100644 index 000000000000..365762360285 --- /dev/null +++ b/changelog.d/14973.misc @@ -0,0 +1 @@ +Improve performance of `/sync` in a few situations. diff --git a/synapse/api/filtering.py b/synapse/api/filtering.py index 83c42fc25a22..b9f432cc234a 100644 --- a/synapse/api/filtering.py +++ b/synapse/api/filtering.py @@ -219,9 +219,13 @@ def __init__(self, hs: "HomeServer", filter_json: JsonDict): self._room_timeline_filter = Filter(hs, room_filter_json.get("timeline", {})) self._room_state_filter = Filter(hs, room_filter_json.get("state", {})) self._room_ephemeral_filter = Filter(hs, room_filter_json.get("ephemeral", {})) - self._room_account_data = Filter(hs, room_filter_json.get("account_data", {})) + self._room_account_data_filter = Filter( + hs, room_filter_json.get("account_data", {}) + ) self._presence_filter = Filter(hs, filter_json.get("presence", {})) - self._account_data = Filter(hs, filter_json.get("account_data", {})) + self._global_account_data_filter = Filter( + hs, filter_json.get("account_data", {}) + ) self.include_leave = filter_json.get("room", {}).get("include_leave", False) self.event_fields = filter_json.get("event_fields", []) @@ -256,8 +260,10 @@ async def filter_presence( ) -> List[UserPresenceState]: return await self._presence_filter.filter(presence_states) - async def filter_account_data(self, events: Iterable[JsonDict]) -> List[JsonDict]: - return await self._account_data.filter(events) + async def filter_global_account_data( + self, events: Iterable[JsonDict] + ) -> List[JsonDict]: + return await self._global_account_data_filter.filter(events) async def filter_room_state(self, events: Iterable[EventBase]) -> List[EventBase]: return await self._room_state_filter.filter( @@ -279,7 +285,7 @@ async def filter_room_ephemeral(self, events: Iterable[JsonDict]) -> List[JsonDi async def filter_room_account_data( self, events: Iterable[JsonDict] ) -> List[JsonDict]: - return await self._room_account_data.filter( + return await self._room_account_data_filter.filter( await self._room_filter.filter(events) ) @@ -292,6 +298,13 @@ def blocks_all_presence(self) -> bool: or self._presence_filter.filters_all_senders() ) + def blocks_all_global_account_data(self) -> bool: + """True if all global acount data will be filtered out.""" + return ( + self._global_account_data_filter.filters_all_types() + or self._global_account_data_filter.filters_all_senders() + ) + def blocks_all_room_ephemeral(self) -> bool: return ( self._room_ephemeral_filter.filters_all_types() @@ -299,6 +312,13 @@ def blocks_all_room_ephemeral(self) -> bool: or self._room_ephemeral_filter.filters_all_rooms() ) + def blocks_all_room_account_data(self) -> bool: + return ( + self._room_account_data_filter.filters_all_types() + or self._room_account_data_filter.filters_all_senders() + or self._room_account_data_filter.filters_all_rooms() + ) + def blocks_all_room_timeline(self) -> bool: return ( self._room_timeline_filter.filters_all_types() diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 67e789eef70e..797de46dbc3b 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -343,10 +343,12 @@ async def get_new_events( } ) - ( - account_data, - room_account_data, - ) = await self.store.get_updated_account_data_for_user(user_id, last_stream_id) + account_data = await self.store.get_updated_global_account_data_for_user( + user_id, last_stream_id + ) + room_account_data = await self.store.get_updated_room_account_data_for_user( + user_id, last_stream_id + ) for account_data_type, content in account_data.items(): results.append({"type": account_data_type, "content": content}) diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 191529bd8e27..1a29abde9839 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -154,9 +154,8 @@ async def _snapshot_all_rooms( tags_by_room = await self.store.get_tags_for_user(user_id) - account_data, account_data_by_room = await self.store.get_account_data_for_user( - user_id - ) + account_data = await self.store.get_global_account_data_for_user(user_id) + account_data_by_room = await self.store.get_room_account_data_for_user(user_id) public_room_ids = await self.store.get_public_room_ids() diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index d236cc09b526..6e7141d2ef53 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -484,7 +484,7 @@ async def copy_room_tags_and_direct_to_room( user_id: The user's ID. """ # Retrieve user account data for predecessor room - user_account_data, _ = await self.store.get_account_data_for_user(user_id) + user_account_data = await self.store.get_global_account_data_for_user(user_id) # Copy direct message state if applicable direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {}) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 202b35eee6d3..399685e5b7d5 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1444,9 +1444,9 @@ async def generate_sync_result( logger.debug("Fetching account data") - account_data_by_room = await self._generate_sync_entry_for_account_data( - sync_result_builder - ) + # Global account data is included if it is not filtered out. + if not sync_config.filter_collection.blocks_all_global_account_data(): + await self._generate_sync_entry_for_account_data(sync_result_builder) # Presence data is included if the server has it enabled and not filtered out. include_presence_data = bool( @@ -1472,9 +1472,7 @@ async def generate_sync_result( ( newly_joined_rooms, newly_left_rooms, - ) = await self._generate_sync_entry_for_rooms( - sync_result_builder, account_data_by_room - ) + ) = await self._generate_sync_entry_for_rooms(sync_result_builder) # Work out which users have joined or left rooms we're in. We use this # to build the presence and device_list parts of the sync response in @@ -1717,35 +1715,29 @@ async def _generate_sync_entry_for_to_device( async def _generate_sync_entry_for_account_data( self, sync_result_builder: "SyncResultBuilder" - ) -> Dict[str, Dict[str, JsonDict]]: - """Generates the account data portion of the sync response. + ) -> None: + """Generates the global account data portion of the sync response. Account data (called "Client Config" in the spec) can be set either globally or for a specific room. Account data consists of a list of events which accumulate state, much like a room. - This function retrieves global and per-room account data. The former is written - to the given `sync_result_builder`. The latter is returned directly, to be - later written to the `sync_result_builder` on a room-by-room basis. + This function retrieves global account data and writes it to the given + `sync_result_builder`. See `_generate_sync_entry_for_rooms` for handling + of per-room account data. Args: sync_result_builder - - Returns: - A dictionary whose keys (room ids) map to the per room account data for that - room. """ sync_config = sync_result_builder.sync_config user_id = sync_result_builder.sync_config.user.to_string() since_token = sync_result_builder.since_token if since_token and not sync_result_builder.full_state: - # TODO Do not fetch room account data if it will be unused. - ( - global_account_data, - account_data_by_room, - ) = await self.store.get_updated_account_data_for_user( - user_id, since_token.account_data_key + global_account_data = ( + await self.store.get_updated_global_account_data_for_user( + user_id, since_token.account_data_key + ) ) push_rules_changed = await self.store.have_push_rules_changed_for_user( @@ -1758,28 +1750,26 @@ async def _generate_sync_entry_for_account_data( sync_config.user ) else: - # TODO Do not fetch room account data if it will be unused. - ( - global_account_data, - account_data_by_room, - ) = await self.store.get_account_data_for_user(sync_config.user.to_string()) + all_global_account_data = await self.store.get_global_account_data_for_user( + user_id + ) - global_account_data = dict(global_account_data) + global_account_data = dict(all_global_account_data) global_account_data["m.push_rules"] = await self.push_rules_for_user( sync_config.user ) - account_data_for_user = await sync_config.filter_collection.filter_account_data( - [ - {"type": account_data_type, "content": content} - for account_data_type, content in global_account_data.items() - ] + account_data_for_user = ( + await sync_config.filter_collection.filter_global_account_data( + [ + {"type": account_data_type, "content": content} + for account_data_type, content in global_account_data.items() + ] + ) ) sync_result_builder.account_data = account_data_for_user - return account_data_by_room - async def _generate_sync_entry_for_presence( self, sync_result_builder: "SyncResultBuilder", @@ -1839,9 +1829,7 @@ async def _generate_sync_entry_for_presence( sync_result_builder.presence = presence async def _generate_sync_entry_for_rooms( - self, - sync_result_builder: "SyncResultBuilder", - account_data_by_room: Dict[str, Dict[str, JsonDict]], + self, sync_result_builder: "SyncResultBuilder" ) -> Tuple[AbstractSet[str], AbstractSet[str]]: """Generates the rooms portion of the sync response. Populates the `sync_result_builder` with the result. @@ -1852,7 +1840,6 @@ async def _generate_sync_entry_for_rooms( Args: sync_result_builder - account_data_by_room: Dictionary of per room account data Returns: Returns a 2-tuple describing rooms the user has joined or left. @@ -1865,9 +1852,30 @@ async def _generate_sync_entry_for_rooms( since_token = sync_result_builder.since_token user_id = sync_result_builder.sync_config.user.to_string() + blocks_all_rooms = ( + sync_result_builder.sync_config.filter_collection.blocks_all_rooms() + ) + + # 0. Start by fetching room account data (if required). + if ( + blocks_all_rooms + or sync_result_builder.sync_config.filter_collection.blocks_all_room_account_data() + ): + account_data_by_room: Mapping[str, Mapping[str, JsonDict]] = {} + elif since_token and not sync_result_builder.full_state: + account_data_by_room = ( + await self.store.get_updated_room_account_data_for_user( + user_id, since_token.account_data_key + ) + ) + else: + account_data_by_room = await self.store.get_room_account_data_for_user( + user_id + ) + # 1. Start by fetching all ephemeral events in rooms we've joined (if required). block_all_room_ephemeral = ( - sync_result_builder.sync_config.filter_collection.blocks_all_rooms() + blocks_all_rooms or sync_result_builder.sync_config.filter_collection.blocks_all_room_ephemeral() ) if block_all_room_ephemeral: @@ -2294,7 +2302,7 @@ async def _generate_room_entry( room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], tags: Optional[Dict[str, Dict[str, Any]]], - account_data: Dict[str, JsonDict], + account_data: Mapping[str, JsonDict], always_include: bool = False, ) -> None: """Populates the `joined` and `archived` section of `sync_result_builder` diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index b9dca8ef3a34..0c0bf540b9ac 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -1192,7 +1192,8 @@ async def on_GET( if not await self._store.get_user_by_id(user_id): raise NotFoundError("User not found") - global_data, by_room_data = await self._store.get_account_data_for_user(user_id) + global_data = await self._store.get_global_account_data_for_user(user_id) + by_room_data = await self._store.get_room_account_data_for_user(user_id) return HTTPStatus.OK, { "account_data": { "global": global_data, diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 8a359d7eb89c..2d6f02c14fe5 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -21,6 +21,7 @@ FrozenSet, Iterable, List, + Mapping, Optional, Tuple, cast, @@ -122,25 +123,25 @@ def get_max_account_data_stream_id(self) -> int: return self._account_data_id_gen.get_current_token() @cached() - async def get_account_data_for_user( + async def get_global_account_data_for_user( self, user_id: str - ) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]: + ) -> Mapping[str, JsonDict]: """ - Get all the client account_data for a user. + Get all the global client account_data for a user. If experimental MSC3391 support is enabled, any entries with an empty content body are excluded; as this means they have been deleted. Args: user_id: The user to get the account_data for. + Returns: - A 2-tuple of a dict of global account_data and a dict mapping from - room_id string to per room account_data dicts. + The global account_data. """ - def get_account_data_for_user_txn( + def get_global_account_data_for_user( txn: LoggingTransaction, - ) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]: + ) -> Dict[str, JsonDict]: # The 'content != '{}' condition below prevents us from using # `simple_select_list_txn` here, as it doesn't support conditions # other than 'equals'. @@ -158,10 +159,34 @@ def get_account_data_for_user_txn( txn.execute(sql, (user_id,)) rows = self.db_pool.cursor_to_dict(txn) - global_account_data = { + return { row["account_data_type"]: db_to_json(row["content"]) for row in rows } + return await self.db_pool.runInteraction( + "get_global_account_data_for_user", get_global_account_data_for_user + ) + + @cached() + async def get_room_account_data_for_user( + self, user_id: str + ) -> Mapping[str, Mapping[str, JsonDict]]: + """ + Get all of the per-room client account_data for a user. + + If experimental MSC3391 support is enabled, any entries with an empty + content body are excluded; as this means they have been deleted. + + Args: + user_id: The user to get the account_data for. + + Returns: + A dict mapping from room_id string to per-room account_data dicts. + """ + + def get_room_account_data_for_user_txn( + txn: LoggingTransaction, + ) -> Dict[str, Dict[str, JsonDict]]: # The 'content != '{}' condition below prevents us from using # `simple_select_list_txn` here, as it doesn't support conditions # other than 'equals'. @@ -185,10 +210,10 @@ def get_account_data_for_user_txn( room_data[row["account_data_type"]] = db_to_json(row["content"]) - return global_account_data, by_room + return by_room return await self.db_pool.runInteraction( - "get_account_data_for_user", get_account_data_for_user_txn + "get_room_account_data_for_user_txn", get_room_account_data_for_user_txn ) @cached(num_args=2, max_entries=5000, tree=True) @@ -342,36 +367,61 @@ def get_updated_room_account_data_txn( "get_updated_room_account_data", get_updated_room_account_data_txn ) - async def get_updated_account_data_for_user( + async def get_updated_global_account_data_for_user( self, user_id: str, stream_id: int - ) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]: - """Get all the client account_data for a that's changed for a user + ) -> Dict[str, JsonDict]: + """Get all the global account_data that's changed for a user. Args: user_id: The user to get the account_data for. stream_id: The point in the stream since which to get updates + Returns: - A deferred pair of a dict of global account_data and a dict - mapping from room_id string to per room account_data dicts. + A dict of global account_data. """ - def get_updated_account_data_for_user_txn( + def get_updated_global_account_data_for_user( txn: LoggingTransaction, - ) -> Tuple[Dict[str, JsonDict], Dict[str, Dict[str, JsonDict]]]: - sql = ( - "SELECT account_data_type, content FROM account_data" - " WHERE user_id = ? AND stream_id > ?" - ) - + ) -> Dict[str, JsonDict]: + sql = """ + SELECT account_data_type, content FROM account_data + WHERE user_id = ? AND stream_id > ? + """ txn.execute(sql, (user_id, stream_id)) - global_account_data = {row[0]: db_to_json(row[1]) for row in txn} + return {row[0]: db_to_json(row[1]) for row in txn} - sql = ( - "SELECT room_id, account_data_type, content FROM room_account_data" - " WHERE user_id = ? AND stream_id > ?" - ) + changed = self._account_data_stream_cache.has_entity_changed( + user_id, int(stream_id) + ) + if not changed: + return {} + + return await self.db_pool.runInteraction( + "get_updated_global_account_data_for_user", + get_updated_global_account_data_for_user, + ) + + async def get_updated_room_account_data_for_user( + self, user_id: str, stream_id: int + ) -> Dict[str, Dict[str, JsonDict]]: + """Get all the room account_data that's changed for a user. + Args: + user_id: The user to get the account_data for. + stream_id: The point in the stream since which to get updates + + Returns: + A dict mapping from room_id string to per room account_data dicts. + """ + + def get_updated_room_account_data_for_user_txn( + txn: LoggingTransaction, + ) -> Dict[str, Dict[str, JsonDict]]: + sql = """ + SELECT room_id, account_data_type, content FROM room_account_data + WHERE user_id = ? AND stream_id > ? + """ txn.execute(sql, (user_id, stream_id)) account_data_by_room: Dict[str, Dict[str, JsonDict]] = {} @@ -379,16 +429,17 @@ def get_updated_account_data_for_user_txn( room_account_data = account_data_by_room.setdefault(row[0], {}) room_account_data[row[1]] = db_to_json(row[2]) - return global_account_data, account_data_by_room + return account_data_by_room changed = self._account_data_stream_cache.has_entity_changed( user_id, int(stream_id) ) if not changed: - return {}, {} + return {} return await self.db_pool.runInteraction( - "get_updated_account_data_for_user", get_updated_account_data_for_user_txn + "get_updated_room_account_data_for_user", + get_updated_room_account_data_for_user_txn, ) @cached(max_entries=5000, iterable=True) @@ -444,7 +495,8 @@ def process_replication_rows( self.get_global_account_data_by_type_for_user.invalidate( (row.user_id, row.data_type) ) - self.get_account_data_for_user.invalidate((row.user_id,)) + self.get_global_account_data_for_user.invalidate((row.user_id,)) + self.get_room_account_data_for_user.invalidate((row.user_id,)) self.get_account_data_for_room.invalidate((row.user_id, row.room_id)) self.get_account_data_for_room_and_type.invalidate( (row.user_id, row.room_id, row.data_type) @@ -492,7 +544,7 @@ async def add_account_data_to_room( ) self._account_data_stream_cache.entity_has_changed(user_id, next_id) - self.get_account_data_for_user.invalidate((user_id,)) + self.get_room_account_data_for_user.invalidate((user_id,)) self.get_account_data_for_room.invalidate((user_id, room_id)) self.get_account_data_for_room_and_type.prefill( (user_id, room_id, account_data_type), content @@ -558,7 +610,7 @@ def _remove_account_data_for_room_txn( return None self._account_data_stream_cache.entity_has_changed(user_id, next_id) - self.get_account_data_for_user.invalidate((user_id,)) + self.get_room_account_data_for_user.invalidate((user_id,)) self.get_account_data_for_room.invalidate((user_id, room_id)) self.get_account_data_for_room_and_type.prefill( (user_id, room_id, account_data_type), {} @@ -593,7 +645,7 @@ async def add_account_data_for_user( ) self._account_data_stream_cache.entity_has_changed(user_id, next_id) - self.get_account_data_for_user.invalidate((user_id,)) + self.get_global_account_data_for_user.invalidate((user_id,)) self.get_global_account_data_by_type_for_user.invalidate( (user_id, account_data_type) ) @@ -761,7 +813,7 @@ def _remove_account_data_for_user_txn( return None self._account_data_stream_cache.entity_has_changed(user_id, next_id) - self.get_account_data_for_user.invalidate((user_id,)) + self.get_global_account_data_for_user.invalidate((user_id,)) self.get_global_account_data_by_type_for_user.prefill( (user_id, account_data_type), {} ) @@ -822,7 +874,10 @@ def _purge_account_data_for_user_txn( txn, self.get_account_data_for_room_and_type, (user_id,) ) self._invalidate_cache_and_stream( - txn, self.get_account_data_for_user, (user_id,) + txn, self.get_global_account_data_for_user, (user_id,) + ) + self._invalidate_cache_and_stream( + txn, self.get_room_account_data_for_user, (user_id,) ) self._invalidate_cache_and_stream( txn, self.get_global_account_data_by_type_for_user, (user_id,) From 14be78d492fc31e743e9e5855ddb8b4c9520985a Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 10 Feb 2023 12:37:07 -0500 Subject: [PATCH 171/344] Support for MSC3758: exact_event_match push condition (#14964) This specifies to search for an exact value match, instead of string globbing. It only works across non-compound JSON values (null, boolean, integer, and strings). --- changelog.d/14964.feature | 1 + rust/benches/evaluator.rs | 65 ++++++++-- rust/src/push/evaluator.rs | 69 ++++++++--- rust/src/push/mod.rs | 83 +++++++++++++ stubs/synapse/synapse_rust/push.pyi | 7 +- synapse/config/experimental.py | 5 + synapse/push/bulk_push_rule_evaluator.py | 18 +-- synapse/types/__init__.py | 2 + tests/push/test_push_rule_evaluator.py | 147 ++++++++++++++++++++++- 9 files changed, 356 insertions(+), 41 deletions(-) create mode 100644 changelog.d/14964.feature diff --git a/changelog.d/14964.feature b/changelog.d/14964.feature new file mode 100644 index 000000000000..13c0bc193b8e --- /dev/null +++ b/changelog.d/14964.feature @@ -0,0 +1 @@ +Implement the experimental `exact_event_match` push rule condition from [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758). diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 35f7a50bcea0..229553ebf8fe 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -16,6 +16,7 @@ use std::collections::BTreeSet; use synapse::push::{ evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, PushRules, + SimpleJsonValue, }; use test::Bencher; @@ -24,9 +25,18 @@ extern crate test; #[bench] fn bench_match_exact(b: &mut Bencher) { let flattened_keys = [ - ("type".to_string(), "m.text".to_string()), - ("room_id".to_string(), "!room:server".to_string()), - ("content.body".to_string(), "test message".to_string()), + ( + "type".to_string(), + SimpleJsonValue::Str("m.text".to_string()), + ), + ( + "room_id".to_string(), + SimpleJsonValue::Str("!room:server".to_string()), + ), + ( + "content.body".to_string(), + SimpleJsonValue::Str("test message".to_string()), + ), ] .into_iter() .collect(); @@ -43,6 +53,7 @@ fn bench_match_exact(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -63,9 +74,18 @@ fn bench_match_exact(b: &mut Bencher) { #[bench] fn bench_match_word(b: &mut Bencher) { let flattened_keys = [ - ("type".to_string(), "m.text".to_string()), - ("room_id".to_string(), "!room:server".to_string()), - ("content.body".to_string(), "test message".to_string()), + ( + "type".to_string(), + SimpleJsonValue::Str("m.text".to_string()), + ), + ( + "room_id".to_string(), + SimpleJsonValue::Str("!room:server".to_string()), + ), + ( + "content.body".to_string(), + SimpleJsonValue::Str("test message".to_string()), + ), ] .into_iter() .collect(); @@ -82,6 +102,7 @@ fn bench_match_word(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -102,9 +123,18 @@ fn bench_match_word(b: &mut Bencher) { #[bench] fn bench_match_word_miss(b: &mut Bencher) { let flattened_keys = [ - ("type".to_string(), "m.text".to_string()), - ("room_id".to_string(), "!room:server".to_string()), - ("content.body".to_string(), "test message".to_string()), + ( + "type".to_string(), + SimpleJsonValue::Str("m.text".to_string()), + ), + ( + "room_id".to_string(), + SimpleJsonValue::Str("!room:server".to_string()), + ), + ( + "content.body".to_string(), + SimpleJsonValue::Str("test message".to_string()), + ), ] .into_iter() .collect(); @@ -121,6 +151,7 @@ fn bench_match_word_miss(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -141,9 +172,18 @@ fn bench_match_word_miss(b: &mut Bencher) { #[bench] fn bench_eval_message(b: &mut Bencher) { let flattened_keys = [ - ("type".to_string(), "m.text".to_string()), - ("room_id".to_string(), "!room:server".to_string()), - ("content.body".to_string(), "test message".to_string()), + ( + "type".to_string(), + SimpleJsonValue::Str("m.text".to_string()), + ), + ( + "room_id".to_string(), + SimpleJsonValue::Str("!room:server".to_string()), + ), + ( + "content.body".to_string(), + SimpleJsonValue::Str("test message".to_string()), + ), ] .into_iter() .collect(); @@ -160,6 +200,7 @@ fn bench_eval_message(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index ec7a8c44530f..dd6b4343ec2c 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -22,8 +22,8 @@ use regex::Regex; use super::{ utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType}, - Action, Condition, EventMatchCondition, FilteredPushRules, KnownCondition, - RelatedEventMatchCondition, + Action, Condition, EventMatchCondition, ExactEventMatchCondition, FilteredPushRules, + KnownCondition, RelatedEventMatchCondition, SimpleJsonValue, }; lazy_static! { @@ -61,9 +61,9 @@ impl RoomVersionFeatures { /// Allows running a set of push rules against a particular event. #[pyclass] pub struct PushRuleEvaluator { - /// A mapping of "flattened" keys to string values in the event, e.g. + /// A mapping of "flattened" keys to simple JSON values in the event, e.g. /// includes things like "type" and "content.msgtype". - flattened_keys: BTreeMap, + flattened_keys: BTreeMap, /// The "content.body", if any. body: String, @@ -87,7 +87,7 @@ pub struct PushRuleEvaluator { /// The related events, indexed by relation type. Flattened in the same manner as /// `flattened_keys`. - related_events_flattened: BTreeMap>, + related_events_flattened: BTreeMap>, /// If msc3664, push rules for related events, is enabled. related_event_match_enabled: bool, @@ -98,6 +98,9 @@ pub struct PushRuleEvaluator { /// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same /// flag as MSC1767 (extensible events core). msc3931_enabled: bool, + + /// If MSC3758 (exact_event_match push rule condition) is enabled. + msc3758_exact_event_match: bool, } #[pymethods] @@ -106,22 +109,23 @@ impl PushRuleEvaluator { #[allow(clippy::too_many_arguments)] #[new] pub fn py_new( - flattened_keys: BTreeMap, + flattened_keys: BTreeMap, has_mentions: bool, user_mentions: BTreeSet, room_mention: bool, room_member_count: u64, sender_power_level: Option, notification_power_levels: BTreeMap, - related_events_flattened: BTreeMap>, + related_events_flattened: BTreeMap>, related_event_match_enabled: bool, room_version_feature_flags: Vec, msc3931_enabled: bool, + msc3758_exact_event_match: bool, ) -> Result { - let body = flattened_keys - .get("content.body") - .cloned() - .unwrap_or_default(); + let body = match flattened_keys.get("content.body") { + Some(SimpleJsonValue::Str(s)) => s.clone(), + _ => String::new(), + }; Ok(PushRuleEvaluator { flattened_keys, @@ -136,6 +140,7 @@ impl PushRuleEvaluator { related_event_match_enabled, room_version_feature_flags, msc3931_enabled, + msc3758_exact_event_match, }) } @@ -252,6 +257,9 @@ impl PushRuleEvaluator { KnownCondition::EventMatch(event_match) => { self.match_event_match(event_match, user_id)? } + KnownCondition::ExactEventMatch(exact_event_match) => { + self.match_exact_event_match(exact_event_match)? + } KnownCondition::RelatedEventMatch(event_match) => { self.match_related_event_match(event_match, user_id)? } @@ -337,7 +345,9 @@ impl PushRuleEvaluator { return Ok(false); }; - let haystack = if let Some(haystack) = self.flattened_keys.get(&*event_match.key) { + let haystack = if let Some(SimpleJsonValue::Str(haystack)) = + self.flattened_keys.get(&*event_match.key) + { haystack } else { return Ok(false); @@ -355,6 +365,27 @@ impl PushRuleEvaluator { compiled_pattern.is_match(haystack) } + /// Evaluates a `exact_event_match` condition. (MSC3758) + fn match_exact_event_match( + &self, + exact_event_match: &ExactEventMatchCondition, + ) -> Result { + // First check if the feature is enabled. + if !self.msc3758_exact_event_match { + return Ok(false); + } + + let value = &exact_event_match.value; + + let haystack = if let Some(haystack) = self.flattened_keys.get(&*exact_event_match.key) { + haystack + } else { + return Ok(false); + }; + + Ok(haystack == &**value) + } + /// Evaluates a `related_event_match` condition. (MSC3664) fn match_related_event_match( &self, @@ -410,7 +441,7 @@ impl PushRuleEvaluator { return Ok(false); }; - let haystack = if let Some(haystack) = event.get(&**key) { + let haystack = if let Some(SimpleJsonValue::Str(haystack)) = event.get(&**key) { haystack } else { return Ok(false); @@ -455,7 +486,10 @@ impl PushRuleEvaluator { #[test] fn push_rule_evaluator() { let mut flattened_keys = BTreeMap::new(); - flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string()); + flattened_keys.insert( + "content.body".to_string(), + SimpleJsonValue::Str("foo bar bob hello".to_string()), + ); let evaluator = PushRuleEvaluator::py_new( flattened_keys, false, @@ -468,6 +502,7 @@ fn push_rule_evaluator() { true, vec![], true, + true, ) .unwrap(); @@ -482,7 +517,10 @@ fn test_requires_room_version_supports_condition() { use crate::push::{PushRule, PushRules}; let mut flattened_keys = BTreeMap::new(); - flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string()); + flattened_keys.insert( + "content.body".to_string(), + SimpleJsonValue::Str("foo bar bob hello".to_string()), + ); let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()]; let evaluator = PushRuleEvaluator::py_new( flattened_keys, @@ -496,6 +534,7 @@ fn test_requires_room_version_supports_condition() { false, flags, true, + true, ) .unwrap(); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 3c4f876cabf5..79e519fe111d 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -56,7 +56,9 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use anyhow::{Context, Error}; use log::warn; +use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; +use pyo3::types::{PyBool, PyLong, PyString}; use pythonize::{depythonize, pythonize}; use serde::de::Error as _; use serde::{Deserialize, Serialize}; @@ -248,6 +250,36 @@ impl<'de> Deserialize<'de> for Action { } } +/// A simple JSON values (string, int, boolean, or null). +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(untagged)] +pub enum SimpleJsonValue { + Str(String), + Int(i64), + Bool(bool), + Null, +} + +impl<'source> FromPyObject<'source> for SimpleJsonValue { + fn extract(ob: &'source PyAny) -> PyResult { + if let Ok(s) = ::try_from(ob) { + Ok(SimpleJsonValue::Str(s.to_string())) + // A bool *is* an int, ensure we try bool first. + } else if let Ok(b) = ::try_from(ob) { + Ok(SimpleJsonValue::Bool(b.extract()?)) + } else if let Ok(i) = ::try_from(ob) { + Ok(SimpleJsonValue::Int(i.extract()?)) + } else if ob.is_none() { + Ok(SimpleJsonValue::Null) + } else { + Err(PyTypeError::new_err(format!( + "Can't convert from {} to SimpleJsonValue", + ob.get_type().name()? + ))) + } + } +} + /// A condition used in push rules to match against an event. /// /// We need this split as `serde` doesn't give us the ability to have a @@ -267,6 +299,8 @@ pub enum Condition { #[serde(tag = "kind")] pub enum KnownCondition { EventMatch(EventMatchCondition), + #[serde(rename = "com.beeper.msc3758.exact_event_match")] + ExactEventMatch(ExactEventMatchCondition), #[serde(rename = "im.nheko.msc3664.related_event_match")] RelatedEventMatch(RelatedEventMatchCondition), #[serde(rename = "org.matrix.msc3952.is_user_mention")] @@ -309,6 +343,13 @@ pub struct EventMatchCondition { pub pattern_type: Option>, } +/// The body of a [`Condition::ExactEventMatch`] +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ExactEventMatchCondition { + pub key: Cow<'static, str>, + pub value: Cow<'static, SimpleJsonValue>, +} + /// The body of a [`Condition::RelatedEventMatch`] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RelatedEventMatchCondition { @@ -542,6 +583,48 @@ fn test_deserialize_unstable_msc3931_condition() { )); } +#[test] +fn test_deserialize_unstable_msc3758_condition() { + // A string condition should work. + let json = + r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":"foo"}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::ExactEventMatch(_)) + )); + + // A boolean condition should work. + let json = + r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":true}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::ExactEventMatch(_)) + )); + + // An integer condition should work. + let json = r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":1}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::ExactEventMatch(_)) + )); + + // A null condition should work + let json = + r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":null}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::ExactEventMatch(_)) + )); +} + #[test] fn test_deserialize_unstable_msc3952_user_condition() { let json = r#"{"kind":"org.matrix.msc3952.is_user_mention"}"#; diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 754acab2f978..328f681a29fc 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -14,7 +14,7 @@ from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union -from synapse.types import JsonDict +from synapse.types import JsonDict, SimpleJsonValue class PushRule: @property @@ -56,17 +56,18 @@ def get_base_rule_ids() -> Collection[str]: ... class PushRuleEvaluator: def __init__( self, - flattened_keys: Mapping[str, str], + flattened_keys: Mapping[str, SimpleJsonValue], has_mentions: bool, user_mentions: Set[str], room_mention: bool, room_member_count: int, sender_power_level: Optional[int], notification_power_levels: Mapping[str, int], - related_events_flattened: Mapping[str, Mapping[str, str]], + related_events_flattened: Mapping[str, Mapping[str, SimpleJsonValue]], related_event_match_enabled: bool, room_version_feature_flags: Tuple[str, ...], msc3931_enabled: bool, + msc3758_exact_event_match: bool, ): ... def run( self, diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 5e3a889081b0..6ac2f0c10d52 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -169,6 +169,11 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC3925: do not replace events with their edits self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False) + # MSC3758: exact_event_match push rule condition + self.msc3758_exact_event_match = experimental.get( + "msc3758_exact_event_match", False + ) + # MSC3873: Disambiguate event_match keys. self.msc3783_escape_event_match_key = experimental.get( "msc3783_escape_event_match_key", False diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 39d2f88f0349..8568aca52898 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -43,6 +43,7 @@ from synapse.state import POWER_KEY from synapse.storage.databases.main.roommember import EventIdMembership from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator +from synapse.types import SimpleJsonValue from synapse.types.state import StateFilter from synapse.util.caches import register_cache from synapse.util.metrics import measure_func @@ -256,13 +257,15 @@ async def _get_power_levels_and_sender_level( return pl_event.content if pl_event else {}, sender_level - async def _related_events(self, event: EventBase) -> Dict[str, Dict[str, str]]: + async def _related_events( + self, event: EventBase + ) -> Dict[str, Dict[str, SimpleJsonValue]]: """Fetches the related events for 'event'. Sets the im.vector.is_falling_back key if the event is from a fallback relation Returns: Mapping of relation type to flattened events. """ - related_events: Dict[str, Dict[str, str]] = {} + related_events: Dict[str, Dict[str, SimpleJsonValue]] = {} if self._related_event_match_enabled: related_event_id = event.content.get("m.relates_to", {}).get("event_id") relation_type = event.content.get("m.relates_to", {}).get("rel_type") @@ -425,6 +428,7 @@ async def _action_for_event_by_user( self._related_event_match_enabled, event.room_version.msc3931_push_features, self.hs.config.experimental.msc1767_enabled, # MSC3931 flag + self.hs.config.experimental.msc3758_exact_event_match, ) users = rules_by_user.keys() @@ -501,15 +505,15 @@ async def _action_for_event_by_user( def _flatten_dict( d: Union[EventBase, Mapping[str, Any]], prefix: Optional[List[str]] = None, - result: Optional[Dict[str, str]] = None, + result: Optional[Dict[str, SimpleJsonValue]] = None, *, msc3783_escape_event_match_key: bool = False, -) -> Dict[str, str]: +) -> Dict[str, SimpleJsonValue]: """ Given a JSON dictionary (or event) which might contain sub dictionaries, flatten it into a single layer dictionary by combining the keys & sub-keys. - Any (non-dictionary), non-string value is dropped. + String, integer, boolean, and null values are kept. All others are dropped. Transforms: @@ -538,8 +542,8 @@ def _flatten_dict( # nested fields. key = key.replace("\\", "\\\\").replace(".", "\\.") - if isinstance(value, str): - result[".".join(prefix + [key])] = value.lower() + if isinstance(value, (bool, str)) or type(value) is int or value is None: + result[".".join(prefix + [key])] = value elif isinstance(value, Mapping): # do not set `room_version` due to recursion considerations below _flatten_dict( diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index f82d1cfc298b..52e366c8ae71 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -69,6 +69,8 @@ MutableStateMap = MutableMapping[StateKey, T] # JSON types. These could be made stronger, but will do for now. +# A "simple" (canonical) JSON value. +SimpleJsonValue = Optional[Union[str, int, bool]] # A JSON-serialisable dict. JsonDict = Dict[str, Any] # A JSON-serialisable mapping; roughly speaking an immutable JSONDict. diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 516b65cc3c97..660344734175 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -57,7 +57,7 @@ def test_nested(self) -> None: ) def test_non_string(self) -> None: - """Non-string items are dropped.""" + """Booleans, ints, and nulls should be kept while other items are dropped.""" input: Dict[str, Any] = { "woo": "woo", "foo": True, @@ -66,7 +66,9 @@ def test_non_string(self) -> None: "fuzz": [], "boo": {}, } - self.assertEqual({"woo": "woo"}, _flatten_dict(input)) + self.assertEqual( + {"woo": "woo", "foo": True, "bar": 1, "baz": None}, _flatten_dict(input) + ) def test_event(self) -> None: """Events can also be flattened.""" @@ -86,9 +88,9 @@ def test_event(self) -> None: ) expected = { "content.msgtype": "m.text", - "content.body": "hello world!", + "content.body": "Hello world!", "content.format": "org.matrix.custom.html", - "content.formatted_body": "

hello world!

", + "content.formatted_body": "

Hello world!

", "room_id": "!test:test", "sender": "@alice:test", "type": "m.room.message", @@ -166,6 +168,7 @@ def _get_evaluator( related_event_match_enabled=True, room_version_feature_flags=event.room_version.msc3931_push_features, msc3931_enabled=True, + msc3758_exact_event_match=True, ) def test_display_name(self) -> None: @@ -410,6 +413,142 @@ def test_event_match_non_body(self) -> None: "pattern should not match before a newline", ) + def test_exact_event_match_string(self) -> None: + """Check that exact_event_match conditions work as expected for strings.""" + + # Test against a string value. + condition = { + "kind": "com.beeper.msc3758.exact_event_match", + "key": "content.value", + "value": "foobaz", + } + self._assert_matches( + condition, + {"value": "foobaz"}, + "exact value should match", + ) + self._assert_not_matches( + condition, + {"value": "FoobaZ"}, + "values should match and be case-sensitive", + ) + self._assert_not_matches( + condition, + {"value": "test foobaz test"}, + "values must exactly match", + ) + value: Any + for value in (True, False, 1, 1.1, None, [], {}): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect types should not match", + ) + + # it should work on frozendicts too + self._assert_matches( + condition, + frozendict.frozendict({"value": "foobaz"}), + "values should match on frozendicts", + ) + + def test_exact_event_match_boolean(self) -> None: + """Check that exact_event_match conditions work as expected for booleans.""" + + # Test against a True boolean value. + condition = { + "kind": "com.beeper.msc3758.exact_event_match", + "key": "content.value", + "value": True, + } + self._assert_matches( + condition, + {"value": True}, + "exact value should match", + ) + self._assert_not_matches( + condition, + {"value": False}, + "incorrect values should not match", + ) + for value in ("foobaz", 1, 1.1, None, [], {}): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect types should not match", + ) + + # Test against a False boolean value. + condition = { + "kind": "com.beeper.msc3758.exact_event_match", + "key": "content.value", + "value": False, + } + self._assert_matches( + condition, + {"value": False}, + "exact value should match", + ) + self._assert_not_matches( + condition, + {"value": True}, + "incorrect values should not match", + ) + # Choose false-y values to ensure there's no type coercion. + for value in ("", 0, 1.1, None, [], {}): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect types should not match", + ) + + def test_exact_event_match_null(self) -> None: + """Check that exact_event_match conditions work as expected for null.""" + + condition = { + "kind": "com.beeper.msc3758.exact_event_match", + "key": "content.value", + "value": None, + } + self._assert_matches( + condition, + {"value": None}, + "exact value should match", + ) + for value in ("foobaz", True, False, 1, 1.1, [], {}): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect types should not match", + ) + + def test_exact_event_match_integer(self) -> None: + """Check that exact_event_match conditions work as expected for integers.""" + + condition = { + "kind": "com.beeper.msc3758.exact_event_match", + "key": "content.value", + "value": 1, + } + self._assert_matches( + condition, + {"value": 1}, + "exact value should match", + ) + value: Any + for value in (1.1, -1, 0): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect values should not match", + ) + for value in ("1", True, False, None, [], {}): + self._assert_not_matches( + condition, + {"value": value}, + "incorrect types should not match", + ) + def test_no_body(self) -> None: """Not having a body shouldn't break the evaluator.""" evaluator = self._get_evaluator({}) From d0c713cc85f094c323b2ba3f02d8ac411a7f0705 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 10 Feb 2023 23:29:00 +0000 Subject: [PATCH 172/344] Return read-only collections from `@cached` methods (#13755) It's important that collections returned from `@cached` methods are not modified, otherwise future retrievals from the cache will return the modified collection. This applies to the return values from `@cached` methods and the values inside the dictionaries returned by `@cachedList` methods. It's not necessary for the dictionaries returned by `@cachedList` methods themselves to be read-only. Signed-off-by: Sean Quah Co-authored-by: David Robertson --- changelog.d/13755.misc | 1 + synapse/app/phone_stats_home.py | 4 +-- synapse/config/room_directory.py | 6 ++--- synapse/events/builder.py | 6 ++--- synapse/federation/federation_server.py | 3 ++- synapse/handlers/directory.py | 6 ++--- synapse/handlers/receipts.py | 4 +-- synapse/handlers/room.py | 2 +- synapse/handlers/sync.py | 4 +-- synapse/push/bulk_push_rule_evaluator.py | 4 +-- synapse/state/__init__.py | 2 +- synapse/storage/controllers/state.py | 6 ++--- .../storage/databases/main/account_data.py | 2 +- synapse/storage/databases/main/appservice.py | 2 +- synapse/storage/databases/main/devices.py | 17 ++++++++----- synapse/storage/databases/main/directory.py | 4 +-- .../storage/databases/main/end_to_end_keys.py | 25 +++++++++++-------- .../databases/main/event_federation.py | 11 +++++--- .../databases/main/monthly_active_users.py | 4 +-- synapse/storage/databases/main/receipts.py | 10 +++++--- .../storage/databases/main/registration.py | 4 +-- synapse/storage/databases/main/relations.py | 7 ++++-- synapse/storage/databases/main/roommember.py | 19 +++++++------- synapse/storage/databases/main/signatures.py | 6 ++--- synapse/storage/databases/main/tags.py | 8 +++--- .../storage/databases/main/user_directory.py | 4 +-- tests/rest/admin/test_server_notice.py | 4 +-- 27 files changed, 98 insertions(+), 77 deletions(-) create mode 100644 changelog.d/13755.misc diff --git a/changelog.d/13755.misc b/changelog.d/13755.misc new file mode 100644 index 000000000000..662ee00e99d5 --- /dev/null +++ b/changelog.d/13755.misc @@ -0,0 +1 @@ +Re-type hint some collections as read-only. diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 53db1e85b3d8..897dd3edac3e 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -15,7 +15,7 @@ import math import resource import sys -from typing import TYPE_CHECKING, List, Sized, Tuple +from typing import TYPE_CHECKING, List, Mapping, Sized, Tuple from prometheus_client import Gauge @@ -194,7 +194,7 @@ def performance_stats_init() -> None: @wrap_as_background_process("generate_monthly_active_users") async def generate_monthly_active_users() -> None: current_mau_count = 0 - current_mau_count_by_service = {} + current_mau_count_by_service: Mapping[str, int] = {} reserved_users: Sized = () store = hs.get_datastores().main if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only: diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 3ed236217fd4..8666c22f010d 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List +from typing import Any, Collection from matrix_common.regex import glob_to_regex @@ -70,7 +70,7 @@ def is_alias_creation_allowed(self, user_id: str, room_id: str, alias: str) -> b return False def is_publishing_room_allowed( - self, user_id: str, room_id: str, aliases: List[str] + self, user_id: str, room_id: str, aliases: Collection[str] ) -> bool: """Checks if the given user is allowed to publish the room @@ -122,7 +122,7 @@ def __init__(self, option_name: str, rule: JsonDict): except Exception as e: raise ConfigError("Failed to parse glob into regex") from e - def matches(self, user_id: str, room_id: str, aliases: List[str]) -> bool: + def matches(self, user_id: str, room_id: str, aliases: Collection[str]) -> bool: """Tests if this rule matches the given user_id, room_id and aliases. Args: diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 94dd1298e177..c82745275f94 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any, Collection, Dict, List, Optional, Tuple, Union import attr from signedjson.types import SigningKey @@ -103,7 +103,7 @@ def is_state(self) -> bool: async def build( self, - prev_event_ids: List[str], + prev_event_ids: Collection[str], auth_event_ids: Optional[List[str]], depth: Optional[int] = None, ) -> EventBase: @@ -136,7 +136,7 @@ async def build( format_version = self.room_version.event_format # The types of auth/prev events changes between event versions. - prev_events: Union[List[str], List[Tuple[str, Dict[str, str]]]] + prev_events: Union[Collection[str], List[Tuple[str, Dict[str, str]]]] auth_events: Union[List[str], List[Tuple[str, Dict[str, str]]]] if format_version == EventFormatVersions.ROOM_V1_V2: auth_events = await self._store.add_event_hashes(auth_event_ids) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 8d36172484d6..6addc0bb6521 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -23,6 +23,7 @@ Collection, Dict, List, + Mapping, Optional, Tuple, Union, @@ -1512,7 +1513,7 @@ async def on_query(self, query_type: str, args: dict) -> JsonDict: def _get_event_ids_for_partial_state_join( join_event: EventBase, prev_state_ids: StateMap[str], - summary: Dict[str, MemberSummary], + summary: Mapping[str, MemberSummary], ) -> Collection[str]: """Calculate state to be returned in a partial_state send_join diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index d31b0fbb1745..a5798e9483ce 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -14,7 +14,7 @@ import logging import string -from typing import TYPE_CHECKING, Iterable, List, Optional +from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence from typing_extensions import Literal @@ -486,7 +486,7 @@ async def edit_published_room_list( ) if canonical_alias: # Ensure we do not mutate room_aliases. - room_aliases = room_aliases + [canonical_alias] + room_aliases = list(room_aliases) + [canonical_alias] if not self.config.roomdirectory.is_publishing_room_allowed( user_id, room_id, room_aliases @@ -529,7 +529,7 @@ async def edit_published_appservice_room_list( async def get_aliases_for_room( self, requester: Requester, room_id: str - ) -> List[str]: + ) -> Sequence[str]: """ Get a list of the aliases that currently point to this room on this server """ diff --git a/synapse/handlers/receipts.py b/synapse/handlers/receipts.py index 04c61ae3ddbd..2bacdebfb5f9 100644 --- a/synapse/handlers/receipts.py +++ b/synapse/handlers/receipts.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple +from typing import TYPE_CHECKING, Iterable, List, Optional, Sequence, Tuple from synapse.api.constants import EduTypes, ReceiptTypes from synapse.appservice import ApplicationService @@ -189,7 +189,7 @@ def __init__(self, hs: "HomeServer"): @staticmethod def filter_out_private_receipts( - rooms: List[JsonDict], user_id: str + rooms: Sequence[JsonDict], user_id: str ) -> List[JsonDict]: """ Filters a list of serialized receipts (as returned by /sync and /initialSync) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 0e759b8a5d7a..060bbcb18177 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1928,6 +1928,6 @@ async def shutdown_room( return { "kicked_users": kicked_users, "failed_to_kick_users": failed_to_kick_users, - "local_aliases": aliases_for_room, + "local_aliases": list(aliases_for_room), "new_room_id": new_room_id, } diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 399685e5b7d5..4bae46158acf 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1519,7 +1519,7 @@ async def generate_sync_result( one_time_keys_count = await self.store.count_e2e_one_time_keys( user_id, device_id ) - unused_fallback_key_types = ( + unused_fallback_key_types = list( await self.store.get_e2e_unused_fallback_key_types(user_id, device_id) ) @@ -2301,7 +2301,7 @@ async def _generate_room_entry( sync_result_builder: "SyncResultBuilder", room_builder: "RoomSyncResultBuilder", ephemeral: List[JsonDict], - tags: Optional[Dict[str, Dict[str, Any]]], + tags: Optional[Mapping[str, Mapping[str, Any]]], account_data: Mapping[str, JsonDict], always_include: bool = False, ) -> None: diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 8568aca52898..f6a5bffb0f6b 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -22,6 +22,7 @@ List, Mapping, Optional, + Sequence, Set, Tuple, Union, @@ -149,7 +150,7 @@ async def _get_rules_for_event( # little, we can skip fetching a huge number of push rules in large rooms. # This helps make joins and leaves faster. if event.type == EventTypes.Member: - local_users = [] + local_users: Sequence[str] = [] # We never notify a user about their own actions. This is enforced in # `_action_for_event_by_user` in the loop over `rules_by_user`, but we # do the same check here to avoid unnecessary DB queries. @@ -184,7 +185,6 @@ async def _get_rules_for_event( if event.type == EventTypes.Member and event.membership == Membership.INVITE: invited = event.state_key if invited and self.hs.is_mine_id(invited) and invited not in local_users: - local_users = list(local_users) local_users.append(invited) if not local_users: diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index e877e6f1a12b..4dc25df67e17 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -226,7 +226,7 @@ async def compute_state_after_events( return await ret.get_state(self._state_storage_controller, state_filter) async def get_current_user_ids_in_room( - self, room_id: str, latest_event_ids: List[str] + self, room_id: str, latest_event_ids: Collection[str] ) -> Set[str]: """ Get the users IDs who are currently in a room. diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 52efd4a1719b..9d7a8a792f71 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -14,6 +14,7 @@ import logging from typing import ( TYPE_CHECKING, + AbstractSet, Any, Awaitable, Callable, @@ -23,7 +24,6 @@ List, Mapping, Optional, - Set, Tuple, ) @@ -527,7 +527,7 @@ async def get_current_state_event( ) return state_map.get(key) - async def get_current_hosts_in_room(self, room_id: str) -> Set[str]: + async def get_current_hosts_in_room(self, room_id: str) -> AbstractSet[str]: """Get current hosts in room based on current state. Blocks until we have full state for the given room. This only happens for rooms @@ -584,7 +584,7 @@ async def get_current_hosts_in_room_or_partial_state_approximation( async def get_users_in_room_with_profiles( self, room_id: str - ) -> Dict[str, ProfileInfo]: + ) -> Mapping[str, ProfileInfo]: """ Get the current users in the room with their profiles. If the room is currently partial-stated, this will block until the room has diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 2d6f02c14fe5..95567826f27a 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -240,7 +240,7 @@ async def get_global_account_data_by_type_for_user( @cached(num_args=2, tree=True) async def get_account_data_for_room( self, user_id: str, room_id: str - ) -> Dict[str, JsonDict]: + ) -> Mapping[str, JsonDict]: """Get all the client account_data for a user for a room. Args: diff --git a/synapse/storage/databases/main/appservice.py b/synapse/storage/databases/main/appservice.py index 5fb152c4ff07..484db175d090 100644 --- a/synapse/storage/databases/main/appservice.py +++ b/synapse/storage/databases/main/appservice.py @@ -166,7 +166,7 @@ async def get_app_service_users_in_room( room_id: str, app_service: "ApplicationService", cache_context: _CacheContext, - ) -> List[str]: + ) -> Sequence[str]: """ Get all users in a room that the appservice controls. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 85c1778a81ce..1ca66d57d40c 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -21,6 +21,7 @@ Dict, Iterable, List, + Mapping, Optional, Set, Tuple, @@ -202,7 +203,9 @@ def _invalidate_caches_for_devices( def get_device_stream_token(self) -> int: return self._device_list_id_gen.get_current_token() - async def count_devices_by_users(self, user_ids: Optional[List[str]] = None) -> int: + async def count_devices_by_users( + self, user_ids: Optional[Collection[str]] = None + ) -> int: """Retrieve number of all devices of given users. Only returns number of devices that are not marked as hidden. @@ -213,7 +216,7 @@ async def count_devices_by_users(self, user_ids: Optional[List[str]] = None) -> """ def count_devices_by_users_txn( - txn: LoggingTransaction, user_ids: List[str] + txn: LoggingTransaction, user_ids: Collection[str] ) -> int: sql = """ SELECT count(*) @@ -747,7 +750,7 @@ def _add_user_signature_change_txn( @cancellable async def get_user_devices_from_cache( self, user_ids: Set[str], user_and_device_ids: List[Tuple[str, str]] - ) -> Tuple[Set[str], Dict[str, Dict[str, JsonDict]]]: + ) -> Tuple[Set[str], Dict[str, Mapping[str, JsonDict]]]: """Get the devices (and keys if any) for remote users from the cache. Args: @@ -775,16 +778,18 @@ async def get_user_devices_from_cache( user_ids_not_in_cache = unique_user_ids - user_ids_in_cache # First fetch all the users which all devices are to be returned. - results: Dict[str, Dict[str, JsonDict]] = {} + results: Dict[str, Mapping[str, JsonDict]] = {} for user_id in user_ids: if user_id in user_ids_in_cache: results[user_id] = await self.get_cached_devices_for_user(user_id) # Then fetch all device-specific requests, but skip users we've already # fetched all devices for. + device_specific_results: Dict[str, Dict[str, JsonDict]] = {} for user_id, device_id in user_and_device_ids: if user_id in user_ids_in_cache and user_id not in user_ids: device = await self._get_cached_user_device(user_id, device_id) - results.setdefault(user_id, {})[device_id] = device + device_specific_results.setdefault(user_id, {})[device_id] = device + results.update(device_specific_results) set_tag("in_cache", str(results)) set_tag("not_in_cache", str(user_ids_not_in_cache)) @@ -802,7 +807,7 @@ async def _get_cached_user_device(self, user_id: str, device_id: str) -> JsonDic return db_to_json(content) @cached() - async def get_cached_devices_for_user(self, user_id: str) -> Dict[str, JsonDict]: + async def get_cached_devices_for_user(self, user_id: str) -> Mapping[str, JsonDict]: devices = await self.db_pool.simple_select_list( table="device_lists_remote_cache", keyvalues={"user_id": user_id}, diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index 5903fdaf007a..44aa181174ac 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Iterable, List, Optional, Tuple +from typing import Iterable, List, Optional, Sequence, Tuple import attr @@ -74,7 +74,7 @@ async def get_room_alias_creator(self, room_alias: str) -> str: ) @cached(max_entries=5000) - async def get_aliases_for_room(self, room_id: str) -> List[str]: + async def get_aliases_for_room(self, room_id: str) -> Sequence[str]: return await self.db_pool.simple_select_onecol( "room_aliases", {"room_id": room_id}, diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index c4ac6c33ba54..752dc16e1773 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -20,7 +20,9 @@ Dict, Iterable, List, + Mapping, Optional, + Sequence, Tuple, Union, cast, @@ -691,7 +693,7 @@ def _set_e2e_fallback_keys_txn( @cached(max_entries=10000) async def get_e2e_unused_fallback_key_types( self, user_id: str, device_id: str - ) -> List[str]: + ) -> Sequence[str]: """Returns the fallback key types that have an unused key. Args: @@ -731,7 +733,7 @@ async def get_e2e_cross_signing_key( return user_keys.get(key_type) @cached(num_args=1) - def _get_bare_e2e_cross_signing_keys(self, user_id: str) -> Dict[str, JsonDict]: + def _get_bare_e2e_cross_signing_keys(self, user_id: str) -> Mapping[str, JsonDict]: """Dummy function. Only used to make a cache for _get_bare_e2e_cross_signing_keys_bulk. """ @@ -744,7 +746,7 @@ def _get_bare_e2e_cross_signing_keys(self, user_id: str) -> Dict[str, JsonDict]: ) async def _get_bare_e2e_cross_signing_keys_bulk( self, user_ids: Iterable[str] - ) -> Dict[str, Optional[Dict[str, JsonDict]]]: + ) -> Dict[str, Optional[Mapping[str, JsonDict]]]: """Returns the cross-signing keys for a set of users. The output of this function should be passed to _get_e2e_cross_signing_signatures_txn if the signatures for the calling user need to be fetched. @@ -765,7 +767,7 @@ async def _get_bare_e2e_cross_signing_keys_bulk( ) # The `Optional` comes from the `@cachedList` decorator. - return cast(Dict[str, Optional[Dict[str, JsonDict]]], result) + return cast(Dict[str, Optional[Mapping[str, JsonDict]]], result) def _get_bare_e2e_cross_signing_keys_bulk_txn( self, @@ -924,7 +926,7 @@ def _get_e2e_cross_signing_signatures_txn( @cancellable async def get_e2e_cross_signing_keys_bulk( self, user_ids: List[str], from_user_id: Optional[str] = None - ) -> Dict[str, Optional[Dict[str, JsonDict]]]: + ) -> Dict[str, Optional[Mapping[str, JsonDict]]]: """Returns the cross-signing keys for a set of users. Args: @@ -940,11 +942,14 @@ async def get_e2e_cross_signing_keys_bulk( result = await self._get_bare_e2e_cross_signing_keys_bulk(user_ids) if from_user_id: - result = await self.db_pool.runInteraction( - "get_e2e_cross_signing_signatures", - self._get_e2e_cross_signing_signatures_txn, - result, - from_user_id, + result = cast( + Dict[str, Optional[Mapping[str, JsonDict]]], + await self.db_pool.runInteraction( + "get_e2e_cross_signing_signatures", + self._get_e2e_cross_signing_signatures_txn, + result, + from_user_id, + ), ) return result diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index bbee02ab18f0..ca780cca36ec 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -22,6 +22,7 @@ Iterable, List, Optional, + Sequence, Set, Tuple, cast, @@ -1004,7 +1005,9 @@ def get_insertion_event_backward_extremities_in_room_txn( room_id, ) - async def get_max_depth_of(self, event_ids: List[str]) -> Tuple[Optional[str], int]: + async def get_max_depth_of( + self, event_ids: Collection[str] + ) -> Tuple[Optional[str], int]: """Returns the event ID and depth for the event that has the max depth from a set of event IDs Args: @@ -1141,7 +1144,7 @@ def _get_rooms_with_many_extremities_txn(txn: LoggingTransaction) -> List[str]: ) @cached(max_entries=5000, iterable=True) - async def get_latest_event_ids_in_room(self, room_id: str) -> List[str]: + async def get_latest_event_ids_in_room(self, room_id: str) -> Sequence[str]: return await self.db_pool.simple_select_onecol( table="event_forward_extremities", keyvalues={"room_id": room_id}, @@ -1171,7 +1174,7 @@ def _get_min_depth_interaction( @cancellable async def get_forward_extremities_for_room_at_stream_ordering( self, room_id: str, stream_ordering: int - ) -> List[str]: + ) -> Sequence[str]: """For a given room_id and stream_ordering, return the forward extremeties of the room at that point in "time". @@ -1204,7 +1207,7 @@ async def get_forward_extremities_for_room_at_stream_ordering( @cached(max_entries=5000, num_args=2) async def _get_forward_extremeties_for_room( self, room_id: str, stream_ordering: int - ) -> List[str]: + ) -> Sequence[str]: """For a given room_id and stream_ordering, return the forward extremeties of the room at that point in "time". diff --git a/synapse/storage/databases/main/monthly_active_users.py b/synapse/storage/databases/main/monthly_active_users.py index db9a24db5ec6..4b1061e6d7d0 100644 --- a/synapse/storage/databases/main/monthly_active_users.py +++ b/synapse/storage/databases/main/monthly_active_users.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, cast +from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, cast from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage.database import ( @@ -95,7 +95,7 @@ def _count_users(txn: LoggingTransaction) -> int: return await self.db_pool.runInteraction("count_users", _count_users) @cached(num_args=0) - async def get_monthly_active_count_by_service(self) -> Dict[str, int]: + async def get_monthly_active_count_by_service(self) -> Mapping[str, int]: """Generates current count of monthly active users broken down by service. A service is typically an appservice but also includes native matrix users. Since the `monthly_active_users` table is populated from the `user_ips` table diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 29972d520413..dddf49c2d575 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -21,7 +21,9 @@ Dict, Iterable, List, + Mapping, Optional, + Sequence, Tuple, cast, ) @@ -288,7 +290,7 @@ async def get_linearized_receipts_for_rooms( async def get_linearized_receipts_for_room( self, room_id: str, to_key: int, from_key: Optional[int] = None - ) -> List[dict]: + ) -> Sequence[JsonDict]: """Get receipts for a single room for sending to clients. Args: @@ -311,7 +313,7 @@ async def get_linearized_receipts_for_room( @cached(tree=True) async def _get_linearized_receipts_for_room( self, room_id: str, to_key: int, from_key: Optional[int] = None - ) -> List[JsonDict]: + ) -> Sequence[JsonDict]: """See get_linearized_receipts_for_room""" def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: @@ -354,7 +356,7 @@ def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: ) async def _get_linearized_receipts_for_rooms( self, room_ids: Collection[str], to_key: int, from_key: Optional[int] = None - ) -> Dict[str, List[JsonDict]]: + ) -> Dict[str, Sequence[JsonDict]]: if not room_ids: return {} @@ -416,7 +418,7 @@ def f(txn: LoggingTransaction) -> List[Dict[str, Any]]: ) async def get_linearized_receipts_for_all_rooms( self, to_key: int, from_key: Optional[int] = None - ) -> Dict[str, JsonDict]: + ) -> Mapping[str, JsonDict]: """Get receipts for all rooms between two stream_ids, up to a limit of the latest 100 read receipts. diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 31f0f2bd3d84..9a55e17624f6 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -16,7 +16,7 @@ import logging import random import re -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple, Union, cast import attr @@ -192,7 +192,7 @@ def __init__( ) @cached() - async def get_user_by_id(self, user_id: str) -> Optional[Dict[str, Any]]: + async def get_user_by_id(self, user_id: str) -> Optional[Mapping[str, Any]]: """Deprecated: use get_userinfo_by_id instead""" def get_user_by_id_txn(txn: LoggingTransaction) -> Optional[Dict[str, Any]]: diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 0018d6f7abcb..fa3266c081b5 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -22,6 +22,7 @@ List, Mapping, Optional, + Sequence, Set, Tuple, Union, @@ -171,7 +172,7 @@ async def get_relations_for_event( direction: Direction = Direction.BACKWARDS, from_token: Optional[StreamToken] = None, to_token: Optional[StreamToken] = None, - ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]: + ) -> Tuple[Sequence[_RelatedEvent], Optional[StreamToken]]: """Get a list of relations for an event, ordered by topological ordering. Args: @@ -397,7 +398,9 @@ async def event_is_target_of_relation(self, parent_id: str) -> bool: return result is not None @cached() - async def get_aggregation_groups_for_event(self, event_id: str) -> List[JsonDict]: + async def get_aggregation_groups_for_event( + self, event_id: str + ) -> Sequence[JsonDict]: raise NotImplementedError() @cachedList( diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index ea6a5e2f3495..694a5b802c7c 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -24,6 +24,7 @@ List, Mapping, Optional, + Sequence, Set, Tuple, Union, @@ -153,7 +154,7 @@ def _transact(txn: LoggingTransaction) -> int: return self._known_servers_count @cached(max_entries=100000, iterable=True) - async def get_users_in_room(self, room_id: str) -> List[str]: + async def get_users_in_room(self, room_id: str) -> Sequence[str]: """Returns a list of users in the room. Will return inaccurate results for rooms with partial state, since the state for @@ -190,9 +191,7 @@ def get_users_in_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[s ) @cached() - def get_user_in_room_with_profile( - self, room_id: str, user_id: str - ) -> Dict[str, ProfileInfo]: + def get_user_in_room_with_profile(self, room_id: str, user_id: str) -> ProfileInfo: raise NotImplementedError() @cachedList( @@ -246,7 +245,7 @@ def _get_subset_users_in_room_with_profiles( @cached(max_entries=100000, iterable=True) async def get_users_in_room_with_profiles( self, room_id: str - ) -> Dict[str, ProfileInfo]: + ) -> Mapping[str, ProfileInfo]: """Get a mapping from user ID to profile information for all users in a given room. The profile information comes directly from this room's `m.room.member` @@ -285,7 +284,7 @@ def _get_users_in_room_with_profiles( ) @cached(max_entries=100000) - async def get_room_summary(self, room_id: str) -> Dict[str, MemberSummary]: + async def get_room_summary(self, room_id: str) -> Mapping[str, MemberSummary]: """Get the details of a room roughly suitable for use by the room summary extension to /sync. Useful when lazy loading room members. Args: @@ -357,7 +356,7 @@ async def get_number_joined_users_in_room(self, room_id: str) -> int: @cached() async def get_invited_rooms_for_local_user( self, user_id: str - ) -> List[RoomsForUser]: + ) -> Sequence[RoomsForUser]: """Get all the rooms the *local* user is invited to. Args: @@ -475,7 +474,7 @@ def _get_rooms_for_local_user_where_membership_is_txn( return results @cached(iterable=True) - async def get_local_users_in_room(self, room_id: str) -> List[str]: + async def get_local_users_in_room(self, room_id: str) -> Sequence[str]: """ Retrieves a list of the current roommembers who are local to the server. """ @@ -791,7 +790,7 @@ async def get_users_who_share_room_with_user(self, user_id: str) -> Set[str]: """Returns the set of users who share a room with `user_id`""" room_ids = await self.get_rooms_for_user(user_id) - user_who_share_room = set() + user_who_share_room: Set[str] = set() for room_id in room_ids: user_ids = await self.get_users_in_room(room_id) user_who_share_room.update(user_ids) @@ -953,7 +952,7 @@ async def _check_host_room_membership( return True @cached(iterable=True, max_entries=10000) - async def get_current_hosts_in_room(self, room_id: str) -> Set[str]: + async def get_current_hosts_in_room(self, room_id: str) -> AbstractSet[str]: """Get current hosts in room based on current state.""" # First we check if we already have `get_users_in_room` in the cache, as diff --git a/synapse/storage/databases/main/signatures.py b/synapse/storage/databases/main/signatures.py index 05da15074a73..5dcb1fc0b5f4 100644 --- a/synapse/storage/databases/main/signatures.py +++ b/synapse/storage/databases/main/signatures.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Collection, Dict, List, Tuple +from typing import Collection, Dict, List, Mapping, Tuple from unpaddedbase64 import encode_base64 @@ -26,7 +26,7 @@ class SignatureWorkerStore(EventsWorkerStore): @cached() - def get_event_reference_hash(self, event_id: str) -> Dict[str, Dict[str, bytes]]: + def get_event_reference_hash(self, event_id: str) -> Mapping[str, bytes]: # This is a dummy function to allow get_event_reference_hashes # to use its cache raise NotImplementedError() @@ -36,7 +36,7 @@ def get_event_reference_hash(self, event_id: str) -> Dict[str, Dict[str, bytes]] ) async def get_event_reference_hashes( self, event_ids: Collection[str] - ) -> Dict[str, Dict[str, bytes]]: + ) -> Mapping[str, Mapping[str, bytes]]: """Get all hashes for given events. Args: diff --git a/synapse/storage/databases/main/tags.py b/synapse/storage/databases/main/tags.py index d5500cdd470c..c149a9eacba7 100644 --- a/synapse/storage/databases/main/tags.py +++ b/synapse/storage/databases/main/tags.py @@ -15,7 +15,7 @@ # limitations under the License. import logging -from typing import Any, Dict, Iterable, List, Tuple, cast +from typing import Any, Dict, Iterable, List, Mapping, Tuple, cast from synapse.api.constants import AccountDataTypes from synapse.replication.tcp.streams import AccountDataStream @@ -32,7 +32,9 @@ class TagsWorkerStore(AccountDataWorkerStore): @cached() - async def get_tags_for_user(self, user_id: str) -> Dict[str, Dict[str, JsonDict]]: + async def get_tags_for_user( + self, user_id: str + ) -> Mapping[str, Mapping[str, JsonDict]]: """Get all the tags for a user. @@ -107,7 +109,7 @@ def get_all_updated_tags_txn( async def get_updated_tags( self, user_id: str, stream_id: int - ) -> Dict[str, Dict[str, JsonDict]]: + ) -> Mapping[str, Mapping[str, JsonDict]]: """Get all the tags for the rooms where the tags have changed since the given version diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 14ef5b040db1..f6a6fd4079ec 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -16,9 +16,9 @@ import re from typing import ( TYPE_CHECKING, - Dict, Iterable, List, + Mapping, Optional, Sequence, Set, @@ -586,7 +586,7 @@ def _delete_all_from_user_dir_txn(txn: LoggingTransaction) -> None: ) @cached() - async def get_user_in_directory(self, user_id: str) -> Optional[Dict[str, str]]: + async def get_user_in_directory(self, user_id: str) -> Optional[Mapping[str, str]]: return await self.db_pool.simple_select_one( table="user_directory", keyvalues={"user_id": user_id}, diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index a2f347f666e8..f71ff46d8777 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List +from typing import List, Sequence from twisted.test.proto_helpers import MemoryReactor @@ -558,7 +558,7 @@ def test_update_notice_user_avatar_when_changed(self) -> None: def _check_invite_and_join_status( self, user_id: str, expected_invites: int, expected_memberships: int - ) -> List[RoomsForUser]: + ) -> Sequence[RoomsForUser]: """Check invite and room membership status of a user. Args From 6cddf24e361fe43f086307c833cd814dc03363b6 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Sat, 11 Feb 2023 00:31:05 +0100 Subject: [PATCH 173/344] Faster joins: don't stall when a user joins during a fast join (#14606) Fixes #12801. Complement tests are at https://github.com/matrix-org/complement/pull/567. Avoid blocking on full state when handling a subsequent join into a partial state room. Also always perform a remote join into partial state rooms, since we do not know whether the joining user has been banned and want to avoid leaking history to banned users. Signed-off-by: Mathieu Velten Co-authored-by: Sean Quah Co-authored-by: David Robertson --- changelog.d/14606.misc | 1 + synapse/api/errors.py | 22 +++++ synapse/federation/federation_server.py | 2 +- synapse/handlers/event_auth.py | 16 +-- synapse/handlers/federation.py | 2 +- synapse/handlers/federation_event.py | 59 ++++++++++-- synapse/handlers/message.py | 2 +- synapse/handlers/room.py | 2 +- synapse/handlers/room_member.py | 118 ++++++++++++++++------- synapse/handlers/room_member_worker.py | 5 +- synapse/storage/databases/main/events.py | 21 +--- tests/handlers/test_federation.py | 40 ++++---- 12 files changed, 196 insertions(+), 94 deletions(-) create mode 100644 changelog.d/14606.misc diff --git a/changelog.d/14606.misc b/changelog.d/14606.misc new file mode 100644 index 000000000000..e2debc96d8d4 --- /dev/null +++ b/changelog.d/14606.misc @@ -0,0 +1 @@ +Faster joins: don't stall when another user joins during a fast join resync. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index c2c177fd71d0..9235ce653638 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -751,3 +751,25 @@ class ModuleFailedException(Exception): Raised when a module API callback fails, for example because it raised an exception. """ + + +class PartialStateConflictError(SynapseError): + """An internal error raised when attempting to persist an event with partial state + after the room containing the event has been un-partial stated. + + This error should be handled by recomputing the event context and trying again. + + This error has an HTTP status code so that it can be transported over replication. + It should not be exposed to clients. + """ + + @staticmethod + def message() -> str: + return "Cannot persist partial state event in un-partial stated room" + + def __init__(self) -> None: + super().__init__( + HTTPStatus.CONFLICT, + msg=PartialStateConflictError.message(), + errcode=Codes.UNKNOWN, + ) diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 6addc0bb6521..6d99845de561 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -48,6 +48,7 @@ FederationError, IncompatibleRoomVersionError, NotFoundError, + PartialStateConflictError, SynapseError, UnsupportedRoomVersionError, ) @@ -81,7 +82,6 @@ ReplicationFederationSendEduRestServlet, ReplicationGetQueryRestServlet, ) -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.lock import Lock from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.roommember import MemberSummary diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index a23a8ce2a167..46dd63c3f00d 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -202,7 +202,7 @@ async def check_restricted_join_rules( state_ids: StateMap[str], room_version: RoomVersion, user_id: str, - prev_member_event: Optional[EventBase], + prev_membership: Optional[str], ) -> None: """ Check whether a user can join a room without an invite due to restricted join rules. @@ -214,15 +214,14 @@ async def check_restricted_join_rules( state_ids: The state of the room as it currently is. room_version: The room version of the room being joined. user_id: The user joining the room. - prev_member_event: The current membership event for this user. + prev_membership: The current membership state for this user. `None` if the + user has never joined the room (equivalent to "leave"). Raises: AuthError if the user cannot join the room. """ # If the member is invited or currently joined, then nothing to do. - if prev_member_event and ( - prev_member_event.membership in (Membership.JOIN, Membership.INVITE) - ): + if prev_membership in (Membership.JOIN, Membership.INVITE): return # This is not a room with a restricted join rule, so we don't need to do the @@ -255,13 +254,14 @@ async def check_restricted_join_rules( ) async def has_restricted_join_rules( - self, state_ids: StateMap[str], room_version: RoomVersion + self, partial_state_ids: StateMap[str], room_version: RoomVersion ) -> bool: """ Return if the room has the proper join rules set for access via rooms. Args: - state_ids: The state of the room as it currently is. + state_ids: The state of the room as it currently is. May be full or partial + state. room_version: The room version of the room to query. Returns: @@ -272,7 +272,7 @@ async def has_restricted_join_rules( return False # If there's no join rule, then it defaults to invite (so this doesn't apply). - join_rules_event_id = state_ids.get((EventTypes.JoinRules, ""), None) + join_rules_event_id = partial_state_ids.get((EventTypes.JoinRules, ""), None) if not join_rules_event_id: return False diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 43ed4a3dd1f1..08727e485741 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -49,6 +49,7 @@ FederationPullAttemptBackoffError, HttpResponseException, NotFoundError, + PartialStateConflictError, RequestSendFailed, SynapseError, ) @@ -68,7 +69,6 @@ ReplicationCleanRoomRestServlet, ReplicationStoreRoomOnOutlierMembershipRestServlet, ) -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import JsonDict, StrCollection, get_domain_from_id from synapse.types.state import StateFilter diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 3561f2f1de5e..b7136f8d1ccb 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -47,6 +47,7 @@ FederationError, FederationPullAttemptBackoffError, HttpResponseException, + PartialStateConflictError, RequestSendFailed, SynapseError, ) @@ -74,7 +75,6 @@ ReplicationFederationSendEventsRestServlet, ) from synapse.state import StateResolutionStore -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( PersistedEventPosition, @@ -441,16 +441,17 @@ async def check_join_restrictions( # Check if the user is already in the room or invited to the room. user_id = event.state_key prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None) - prev_member_event = None + prev_membership = None if prev_member_event_id: prev_member_event = await self._store.get_event(prev_member_event_id) + prev_membership = prev_member_event.membership # Check if the member should be allowed access via membership in a space. await self._event_auth_handler.check_restricted_join_rules( prev_state_ids, event.room_version, user_id, - prev_member_event, + prev_membership, ) @trace @@ -526,11 +527,57 @@ async def process_remote_join( "Peristing join-via-remote %s (partial_state: %s)", event, partial_state ) with nested_logging_context(suffix=event.event_id): + if partial_state: + # When handling a second partial state join into a partial state room, + # the returned state will exclude the membership from the first join. To + # preserve prior memberships, we try to compute the partial state before + # the event ourselves if we know about any of the prev events. + # + # When we don't know about any of the prev events, it's fine to just use + # the returned state, since the new join will create a new forward + # extremity, and leave the forward extremity containing our prior + # memberships alone. + prev_event_ids = set(event.prev_event_ids()) + seen_event_ids = await self._store.have_events_in_timeline( + prev_event_ids + ) + missing_event_ids = prev_event_ids - seen_event_ids + + state_maps_to_resolve: List[StateMap[str]] = [] + + # Fetch the state after the prev events that we know about. + state_maps_to_resolve.extend( + ( + await self._state_storage_controller.get_state_groups_ids( + room_id, seen_event_ids, await_full_state=False + ) + ).values() + ) + + # When there are prev events we do not have the state for, we state + # resolve with the state returned by the remote homeserver. + if missing_event_ids or len(state_maps_to_resolve) == 0: + state_maps_to_resolve.append( + {(e.type, e.state_key): e.event_id for e in state} + ) + + state_ids_before_event = ( + await self._state_resolution_handler.resolve_events_with_store( + event.room_id, + room_version.identifier, + state_maps_to_resolve, + event_map=None, + state_res_store=StateResolutionStore(self._store), + ) + ) + else: + state_ids_before_event = { + (e.type, e.state_key): e.event_id for e in state + } + context = await self._state_handler.compute_event_context( event, - state_ids_before_event={ - (e.type, e.state_key): e.event_id for e in state - }, + state_ids_before_event=state_ids_before_event, partial_state=partial_state, ) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 3e30f52e4d9f..8f5b658d9d28 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -38,6 +38,7 @@ Codes, ConsentNotGivenError, NotFoundError, + PartialStateConflictError, ShadowBanError, SynapseError, UnstableSpecAuthError, @@ -57,7 +58,6 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.replication.http.send_event import ReplicationSendEventRestServlet from synapse.replication.http.send_events import ReplicationSendEventsRestServlet -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.types import ( MutableStateMap, diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 060bbcb18177..837dabb3b779 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -43,6 +43,7 @@ Codes, LimitExceededError, NotFoundError, + PartialStateConflictError, StoreError, SynapseError, ) @@ -54,7 +55,6 @@ from synapse.handlers.relations import BundledAggregations from synapse.module_api import NOT_SPAM from synapse.rest.admin._base import assert_user_is_admin -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.streams import EventSource from synapse.types import ( JsonDict, diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 6e7141d2ef53..a965c7ec7698 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -26,7 +26,13 @@ GuestAccess, Membership, ) -from synapse.api.errors import AuthError, Codes, ShadowBanError, SynapseError +from synapse.api.errors import ( + AuthError, + Codes, + PartialStateConflictError, + ShadowBanError, + SynapseError, +) from synapse.api.ratelimiting import Ratelimiter from synapse.event_auth import get_named_level, get_power_level_event from synapse.events import EventBase @@ -34,7 +40,6 @@ from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN from synapse.logging import opentracing from synapse.module_api import NOT_SPAM -from synapse.storage.databases.main.events import PartialStateConflictError from synapse.types import ( JsonDict, Requester, @@ -56,6 +61,13 @@ logger = logging.getLogger(__name__) +class NoKnownServersError(SynapseError): + """No server already resident to the room was provided to the join/knock operation.""" + + def __init__(self, msg: str = "No known servers"): + super().__init__(404, msg) + + class RoomMemberHandler(metaclass=abc.ABCMeta): # TODO(paul): This handler currently contains a messy conflation of # low-level API that works on UserID objects and so on, and REST-level @@ -185,6 +197,10 @@ async def _remote_join( room_id: Room that we are trying to join user: User who is trying to join content: A dict that should be used as the content of the join event. + + Raises: + NoKnownServersError: if remote_room_hosts does not contain a server joined to + the room. """ raise NotImplementedError() @@ -823,14 +839,19 @@ async def update_membership_locked( latest_event_ids = await self.store.get_prev_events_for_room(room_id) - state_before_join = await self.state_handler.compute_state_after_events( - room_id, latest_event_ids + is_partial_state_room = await self.store.is_partial_state_room(room_id) + partial_state_before_join = await self.state_handler.compute_state_after_events( + room_id, latest_event_ids, await_full_state=False ) + # `is_partial_state_room` also indicates whether `partial_state_before_join` is + # partial. # TODO: Refactor into dictionary of explicitly allowed transitions # between old and new state, with specific error messages for some # transitions and generic otherwise - old_state_id = state_before_join.get((EventTypes.Member, target.to_string())) + old_state_id = partial_state_before_join.get( + (EventTypes.Member, target.to_string()) + ) if old_state_id: old_state = await self.store.get_event(old_state_id, allow_none=True) old_membership = old_state.content.get("membership") if old_state else None @@ -881,11 +902,11 @@ async def update_membership_locked( if action == "kick": raise AuthError(403, "The target user is not in the room") - is_host_in_room = await self._is_host_in_room(state_before_join) + is_host_in_room = await self._is_host_in_room(partial_state_before_join) if effective_membership_state == Membership.JOIN: if requester.is_guest: - guest_can_join = await self._can_guest_join(state_before_join) + guest_can_join = await self._can_guest_join(partial_state_before_join) if not guest_can_join: # This should be an auth check, but guests are a local concept, # so don't really fit into the general auth process. @@ -927,8 +948,9 @@ async def update_membership_locked( room_id, remote_room_hosts, content, + is_partial_state_room, is_host_in_room, - state_before_join, + partial_state_before_join, ) if remote_join: if ratelimit: @@ -1073,8 +1095,9 @@ async def _should_perform_remote_join( room_id: str, remote_room_hosts: List[str], content: JsonDict, + is_partial_state_room: bool, is_host_in_room: bool, - state_before_join: StateMap[str], + partial_state_before_join: StateMap[str], ) -> Tuple[bool, List[str]]: """ Check whether the server should do a remote join (as opposed to a local @@ -1093,9 +1116,12 @@ async def _should_perform_remote_join( remote_room_hosts: A list of remote room hosts. content: The content to use as the event body of the join. This may be modified. - is_host_in_room: True if the host is in the room. - state_before_join: The state before the join event (i.e. the resolution of - the states after its parent events). + is_partial_state_room: `True` if the server currently doesn't hold the full + state of the room. + is_host_in_room: `True` if the host is in the room. + partial_state_before_join: The state before the join event (i.e. the + resolution of the states after its parent events). May be full or + partial state, depending on `is_partial_state_room`. Returns: A tuple of: @@ -1109,6 +1135,23 @@ async def _should_perform_remote_join( if not is_host_in_room: return True, remote_room_hosts + prev_member_event_id = partial_state_before_join.get( + (EventTypes.Member, user_id), None + ) + previous_membership = None + if prev_member_event_id: + prev_member_event = await self.store.get_event(prev_member_event_id) + previous_membership = prev_member_event.membership + + # If we are not fully joined yet, and the target is not already in the room, + # let's do a remote join so another server with the full state can validate + # that the user has not been banned for example. + # We could just accept the join and wait for state res to resolve that later on + # but we would then leak room history to this person until then, which is pretty + # bad. + if is_partial_state_room and previous_membership != Membership.JOIN: + return True, remote_room_hosts + # If the host is in the room, but not one of the authorised hosts # for restricted join rules, a remote join must be used. room_version = await self.store.get_room_version(room_id) @@ -1116,21 +1159,19 @@ async def _should_perform_remote_join( # If restricted join rules are not being used, a local join can always # be used. if not await self.event_auth_handler.has_restricted_join_rules( - state_before_join, room_version + partial_state_before_join, room_version ): return False, [] # If the user is invited to the room or already joined, the join # event can always be issued locally. - prev_member_event_id = state_before_join.get((EventTypes.Member, user_id), None) - prev_member_event = None - if prev_member_event_id: - prev_member_event = await self.store.get_event(prev_member_event_id) - if prev_member_event.membership in ( - Membership.JOIN, - Membership.INVITE, - ): - return False, [] + if previous_membership in (Membership.JOIN, Membership.INVITE): + return False, [] + + # All the partial state cases are covered above. We have been given the full + # state of the room. + assert not is_partial_state_room + state_before_join = partial_state_before_join # If the local host has a user who can issue invites, then a local # join can be done. @@ -1154,7 +1195,7 @@ async def _should_perform_remote_join( # Ensure the member should be allowed access via membership in a room. await self.event_auth_handler.check_restricted_join_rules( - state_before_join, room_version, user_id, prev_member_event + state_before_join, room_version, user_id, previous_membership ) # If this is going to be a local join, additional information must @@ -1304,11 +1345,17 @@ async def send_membership_event( if prev_member_event.membership == Membership.JOIN: await self._user_left_room(target_user, room_id) - async def _can_guest_join(self, current_state_ids: StateMap[str]) -> bool: + async def _can_guest_join(self, partial_current_state_ids: StateMap[str]) -> bool: """ Returns whether a guest can join a room based on its current state. + + Args: + partial_current_state_ids: The current state of the room. May be full or + partial state. """ - guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None) + guest_access_id = partial_current_state_ids.get( + (EventTypes.GuestAccess, ""), None + ) if not guest_access_id: return False @@ -1634,19 +1681,25 @@ async def _make_and_store_3pid_invite( ) return event, stream_id - async def _is_host_in_room(self, current_state_ids: StateMap[str]) -> bool: + async def _is_host_in_room(self, partial_current_state_ids: StateMap[str]) -> bool: + """Returns whether the homeserver is in the room based on its current state. + + Args: + partial_current_state_ids: The current state of the room. May be full or + partial state. + """ # Have we just created the room, and is this about to be the very # first member event? - create_event_id = current_state_ids.get(("m.room.create", "")) - if len(current_state_ids) == 1 and create_event_id: + create_event_id = partial_current_state_ids.get(("m.room.create", "")) + if len(partial_current_state_ids) == 1 and create_event_id: # We can only get here if we're in the process of creating the room return True - for etype, state_key in current_state_ids: + for etype, state_key in partial_current_state_ids: if etype != EventTypes.Member or not self.hs.is_mine_id(state_key): continue - event_id = current_state_ids[(etype, state_key)] + event_id = partial_current_state_ids[(etype, state_key)] event = await self.store.get_event(event_id, allow_none=True) if not event: continue @@ -1715,8 +1768,7 @@ async def _remote_join( ] if len(remote_room_hosts) == 0: - raise SynapseError( - 404, + raise NoKnownServersError( "Can't join remote room because no servers " "that are in the room have been provided.", ) @@ -1947,7 +1999,7 @@ async def remote_knock( ] if len(remote_room_hosts) == 0: - raise SynapseError(404, "No known servers") + raise NoKnownServersError() return await self.federation_handler.do_knock( remote_room_hosts, room_id, user.to_string(), content=content diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 221552a2a64b..ba261702d4bc 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -15,8 +15,7 @@ import logging from typing import TYPE_CHECKING, List, Optional, Tuple -from synapse.api.errors import SynapseError -from synapse.handlers.room_member import RoomMemberHandler +from synapse.handlers.room_member import NoKnownServersError, RoomMemberHandler from synapse.replication.http.membership import ( ReplicationRemoteJoinRestServlet as ReplRemoteJoin, ReplicationRemoteKnockRestServlet as ReplRemoteKnock, @@ -52,7 +51,7 @@ async def _remote_join( ) -> Tuple[str, int]: """Implements RoomMemberHandler._remote_join""" if len(remote_room_hosts) == 0: - raise SynapseError(404, "No known servers") + raise NoKnownServersError() ret = await self._remote_join_client( requester=requester, diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index cb66376fb4bf..ffe766fd5646 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -16,7 +16,6 @@ import itertools import logging from collections import OrderedDict -from http import HTTPStatus from typing import ( TYPE_CHECKING, Any, @@ -36,7 +35,7 @@ import synapse.metrics from synapse.api.constants import EventContentFields, EventTypes, RelationTypes -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import PartialStateConflictError from synapse.api.room_versions import RoomVersions from synapse.events import EventBase, relation_from_event from synapse.events.snapshot import EventContext @@ -72,24 +71,6 @@ ) -class PartialStateConflictError(SynapseError): - """An internal error raised when attempting to persist an event with partial state - after the room containing the event has been un-partial stated. - - This error should be handled by recomputing the event context and trying again. - - This error has an HTTP status code so that it can be transported over replication. - It should not be exposed to clients. - """ - - def __init__(self) -> None: - super().__init__( - HTTPStatus.CONFLICT, - msg="Cannot persist partial state event in un-partial stated room", - errcode=Codes.UNKNOWN, - ) - - @attr.s(slots=True, auto_attribs=True) class DeltaState: """Deltas to use to update the `current_state_events` table. diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 57675fa407e4..5868eb2da766 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -575,26 +575,6 @@ def test_failed_partial_join_is_clean(self) -> None: fed_client = fed_handler.federation_client room_id = "!room:example.com" - membership_event = make_event_from_dict( - { - "room_id": room_id, - "type": "m.room.member", - "sender": "@alice:test", - "state_key": "@alice:test", - "content": {"membership": "join"}, - }, - RoomVersions.V10, - ) - - mock_make_membership_event = Mock( - return_value=make_awaitable( - ( - "example.com", - membership_event, - RoomVersions.V10, - ) - ) - ) EVENT_CREATE = make_event_from_dict( { @@ -640,6 +620,26 @@ def test_failed_partial_join_is_clean(self) -> None: }, room_version=RoomVersions.V10, ) + membership_event = make_event_from_dict( + { + "room_id": room_id, + "type": "m.room.member", + "sender": "@alice:test", + "state_key": "@alice:test", + "content": {"membership": "join"}, + "prev_events": [EVENT_INVITATION_MEMBERSHIP.event_id], + }, + RoomVersions.V10, + ) + mock_make_membership_event = Mock( + return_value=make_awaitable( + ( + "example.com", + membership_event, + RoomVersions.V10, + ) + ) + ) mock_send_join = Mock( return_value=make_awaitable( SendJoinResult( From e0bc331a94997385c5d2345424156ee862bffec5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 09:18:07 +0000 Subject: [PATCH 174/344] Bump bleach from 5.0.1 to 6.0.0 (#15059) * Bump bleach from 5.0.1 to 6.0.0 Bumps [bleach](https://github.com/mozilla/bleach) from 5.0.1 to 6.0.0. - [Release notes](https://github.com/mozilla/bleach/releases) - [Changelog](https://github.com/mozilla/bleach/blob/main/CHANGES) - [Commits](https://github.com/mozilla/bleach/compare/v5.0.1...v6.0.0) --- updated-dependencies: - dependency-name: bleach dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15059.misc | 1 + poetry.lock | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15059.misc diff --git a/changelog.d/15059.misc b/changelog.d/15059.misc new file mode 100644 index 000000000000..e962b208fdf5 --- /dev/null +++ b/changelog.d/15059.misc @@ -0,0 +1 @@ +Bump bleach from 5.0.1 to 6.0.0. diff --git a/poetry.lock b/poetry.lock index 7274f8a53754..9009c29d4594 100644 --- a/poetry.lock +++ b/poetry.lock @@ -127,14 +127,14 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "bleach" -version = "5.0.1" +version = "6.0.0" description = "An easy safelist-based HTML-sanitizing tool." category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "bleach-5.0.1-py3-none-any.whl", hash = "sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a"}, - {file = "bleach-5.0.1.tar.gz", hash = "sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c"}, + {file = "bleach-6.0.0-py3-none-any.whl", hash = "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"}, + {file = "bleach-6.0.0.tar.gz", hash = "sha256:1a1a85c1595e07d8db14c5f09f09e6433502c51c595970edc090551f0db99414"}, ] [package.dependencies] @@ -143,7 +143,6 @@ webencodings = "*" [package.extras] css = ["tinycss2 (>=1.1.0,<1.2)"] -dev = ["Sphinx (==4.3.2)", "black (==22.3.0)", "build (==0.8.0)", "flake8 (==4.0.1)", "hashin (==0.17.0)", "mypy (==0.961)", "pip-tools (==6.6.2)", "pytest (==7.1.2)", "tox (==3.25.0)", "twine (==4.0.1)", "wheel (==0.37.1)"] [[package]] name = "canonicaljson" @@ -358,6 +357,8 @@ files = [ {file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"}, {file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"}, {file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"}, + {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5caeb8188c24888c90b5108a441c106f7faa4c4c075a2bcae438c6e8ca73cef"}, + {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4789d1e3e257965e960232345002262ede4d094d1a19f4d3b52e48d4d8f3b885"}, {file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"}, {file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"}, {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"}, From 81497c752b8a0a5ce4b0d5d6f4ff7813b3609b6b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 09:18:26 +0000 Subject: [PATCH 175/344] Bump dtolnay/rust-toolchain from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3 (#15060) * Bump dtolnay/rust-toolchain Bumps [dtolnay/rust-toolchain](https://github.com/dtolnay/rust-toolchain) from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3. - [Release notes](https://github.com/dtolnay/rust-toolchain/releases) - [Commits](https://github.com/dtolnay/rust-toolchain/compare/9cd00a88a73addc8617065438eff914dd08d0955...25dc93b901a87e864900a8aec6c12e9aa794c0c3) --- updated-dependencies: - dependency-name: dtolnay/rust-toolchain dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/latest_deps.yml | 6 +++--- .github/workflows/tests.yml | 18 +++++++++--------- .github/workflows/twisted_trunk.yml | 6 +++--- changelog.d/15060.misc | 1 + 4 files changed, 16 insertions(+), 15 deletions(-) create mode 100644 changelog.d/15060.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 99fc2cee08c4..8485daf87ffe 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -27,7 +27,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -61,7 +61,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -134,7 +134,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: stable - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e945ffe7f3ca..94f7f2657c86 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -112,7 +112,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: 1.58.1 components: clippy @@ -134,7 +134,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: nightly-2022-12-01 components: clippy @@ -154,7 +154,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: 1.58.1 components: rustfmt @@ -221,7 +221,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -266,7 +266,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -386,7 +386,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -531,7 +531,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -562,7 +562,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -585,7 +585,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: nightly-2022-12-01 - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index a59c8dac09b8..5654d2f3e2ad 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -43,7 +43,7 @@ jobs: - run: sudo apt-get -qq install xmlsec1 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -82,7 +82,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@9cd00a88a73addc8617065438eff914dd08d0955 + uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 with: toolchain: stable - uses: Swatinem/rust-cache@v2 diff --git a/changelog.d/15060.misc b/changelog.d/15060.misc new file mode 100644 index 000000000000..5b99e0600359 --- /dev/null +++ b/changelog.d/15060.misc @@ -0,0 +1 @@ +Bump dtolnay/rust-toolchain from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3. From ede0b219ebe0bb7a5d411a17b89e700c6a2edd34 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 09:18:50 +0000 Subject: [PATCH 176/344] Bump systemd-python from 234 to 235 (#15061) * Bump systemd-python from 234 to 235 Bumps [systemd-python](https://github.com/systemd/python-systemd) from 234 to 235. - [Release notes](https://github.com/systemd/python-systemd/releases) - [Changelog](https://github.com/systemd/python-systemd/blob/main/NEWS) - [Commits](https://github.com/systemd/python-systemd/compare/v234...v235) --- updated-dependencies: - dependency-name: systemd-python dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15061.misc | 1 + poetry.lock | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15061.misc diff --git a/changelog.d/15061.misc b/changelog.d/15061.misc new file mode 100644 index 000000000000..40017827a266 --- /dev/null +++ b/changelog.d/15061.misc @@ -0,0 +1 @@ +Bump systemd-python from 234 to 235. diff --git a/poetry.lock b/poetry.lock index 9009c29d4594..6e2cc830867c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2253,13 +2253,13 @@ files = [ [[package]] name = "systemd-python" -version = "234" +version = "235" description = "Python interface for libsystemd" category = "main" optional = true python-versions = "*" files = [ - {file = "systemd-python-234.tar.gz", hash = "sha256:fd0e44bf70eadae45aadc292cb0a7eb5b0b6372cd1b391228047d33895db83e7"}, + {file = "systemd-python-235.tar.gz", hash = "sha256:4e57f39797fd5d9e2d22b8806a252d7c0106c936039d1e71c8c6b8008e695c0a"}, ] [[package]] From 02db6cfd286d279d796e2ceb6bcb4b2ae5b2e501 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 09:19:18 +0000 Subject: [PATCH 177/344] Bump serde_json from 1.0.92 to 1.0.93 (#15062) * Bump serde_json from 1.0.92 to 1.0.93 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.92 to 1.0.93. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.92...v1.0.93) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- Cargo.lock | 4 ++-- changelog.d/15062.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15062.misc diff --git a/Cargo.lock b/Cargo.lock index a9219eac11e3..d44191ff3835 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -343,9 +343,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7434af0dc1cbd59268aa98b4c22c131c0584d2232f6fb166efb993e2832e896a" +checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" dependencies = [ "itoa", "ryu", diff --git a/changelog.d/15062.misc b/changelog.d/15062.misc new file mode 100644 index 000000000000..adc194063026 --- /dev/null +++ b/changelog.d/15062.misc @@ -0,0 +1 @@ +Bump serde_json from 1.0.92 to 1.0.93. From fa7bbd05e27a8651a9b11f1d15805090c115d64a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 09:19:28 +0000 Subject: [PATCH 178/344] Bump types-requests from 2.28.11.8 to 2.28.11.12 (#15063) * Bump types-requests from 2.28.11.8 to 2.28.11.12 Bumps [types-requests](https://github.com/python/typeshed) from 2.28.11.8 to 2.28.11.12. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-requests dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15063.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15063.misc diff --git a/changelog.d/15063.misc b/changelog.d/15063.misc new file mode 100644 index 000000000000..b52e1faed0f4 --- /dev/null +++ b/changelog.d/15063.misc @@ -0,0 +1 @@ +Bump types-requests from 2.28.11.8 to 2.28.11.12. diff --git a/poetry.lock b/poetry.lock index 6e2cc830867c..cef00b233699 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2695,14 +2695,14 @@ files = [ [[package]] name = "types-requests" -version = "2.28.11.8" +version = "2.28.11.12" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.28.11.8.tar.gz", hash = "sha256:e67424525f84adfbeab7268a159d3c633862dafae15c5b19547ce1b55954f0a3"}, - {file = "types_requests-2.28.11.8-py3-none-any.whl", hash = "sha256:61960554baca0008ae7e2db2bd3b322ca9a144d3e80ce270f5fb640817e40994"}, + {file = "types-requests-2.28.11.12.tar.gz", hash = "sha256:fd530aab3fc4f05ee36406af168f0836e6f00f1ee51a0b96b7311f82cb675230"}, + {file = "types_requests-2.28.11.12-py3-none-any.whl", hash = "sha256:dbc2933635860e553ffc59f5e264264981358baffe6342b925e3eb8261f866ee"}, ] [package.dependencies] From 14406d1a08237205a5d19666d006478caeb008b9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 09:19:39 +0000 Subject: [PATCH 179/344] Bump types-pillow from 9.4.0.5 to 9.4.0.10 (#15064) * Bump types-pillow from 9.4.0.5 to 9.4.0.10 Bumps [types-pillow](https://github.com/python/typeshed) from 9.4.0.5 to 9.4.0.10. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pillow dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15064.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15064.misc diff --git a/changelog.d/15064.misc b/changelog.d/15064.misc new file mode 100644 index 000000000000..644d4bb2304f --- /dev/null +++ b/changelog.d/15064.misc @@ -0,0 +1 @@ +Bump types-pillow from 9.4.0.5 to 9.4.0.10. diff --git a/poetry.lock b/poetry.lock index cef00b233699..32c7801dd618 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2644,14 +2644,14 @@ files = [ [[package]] name = "types-pillow" -version = "9.4.0.5" +version = "9.4.0.10" description = "Typing stubs for Pillow" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-Pillow-9.4.0.5.tar.gz", hash = "sha256:941cefaac2f5297d7d2a9989633c95b4063112690dc21c965d46bd5a7fff3c76"}, - {file = "types_Pillow-9.4.0.5-py3-none-any.whl", hash = "sha256:a1d2b3e070b4d852af04f76f018d12bd51abb4abca3b725d91b35e01cda7a2de"}, + {file = "types-Pillow-9.4.0.10.tar.gz", hash = "sha256:341c2345610bba452d1724757c7b997a60f593cf003c101ba239db003a0ae389"}, + {file = "types_Pillow-9.4.0.10-py3-none-any.whl", hash = "sha256:302ce81cfb61aacc8983a3a2ec682cbef66522a2fe8e640f648ac2e3d6f6af53"}, ] [[package]] From 5e1b21e1527d77577d2e5061e9887cc5e6819287 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Feb 2023 09:19:56 +0000 Subject: [PATCH 180/344] Bump sentry-sdk from 1.13.0 to 1.15.0 (#15065) * Bump sentry-sdk from 1.13.0 to 1.15.0 Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 1.13.0 to 1.15.0. - [Release notes](https://github.com/getsentry/sentry-python/releases) - [Changelog](https://github.com/getsentry/sentry-python/blob/master/CHANGELOG.md) - [Commits](https://github.com/getsentry/sentry-python/compare/1.13.0...1.15.0) --- updated-dependencies: - dependency-name: sentry-sdk dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15065.misc | 1 + poetry.lock | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15065.misc diff --git a/changelog.d/15065.misc b/changelog.d/15065.misc new file mode 100644 index 000000000000..df2f9a773ee7 --- /dev/null +++ b/changelog.d/15065.misc @@ -0,0 +1 @@ +Bump sentry-sdk from 1.13.0 to 1.15.0. diff --git a/poetry.lock b/poetry.lock index 32c7801dd618..688fd2ac26f9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2026,14 +2026,14 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "1.13.0" +version = "1.15.0" description = "Python client for Sentry (https://sentry.io)" category = "main" optional = true python-versions = "*" files = [ - {file = "sentry-sdk-1.13.0.tar.gz", hash = "sha256:72da0766c3069a3941eadbdfa0996f83f5a33e55902a19ba399557cfee1dddcc"}, - {file = "sentry_sdk-1.13.0-py2.py3-none-any.whl", hash = "sha256:b7ff6318183e551145b5c4766eb65b59ad5b63ff234dffddc5fb50340cad6729"}, + {file = "sentry-sdk-1.15.0.tar.gz", hash = "sha256:69ecbb2e1ff4db02a06c4f20f6f69cb5dfe3ebfbc06d023e40d77cf78e9c37e7"}, + {file = "sentry_sdk-1.15.0-py2.py3-none-any.whl", hash = "sha256:7ad4d37dd093f4a7cb5ad804c6efe9e8fab8873f7ffc06042dc3f3fd700a93ec"}, ] [package.dependencies] @@ -2051,7 +2051,8 @@ falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] flask = ["blinker (>=1.1)", "flask (>=0.11)"] httpx = ["httpx (>=0.16.0)"] -opentelemetry = ["opentelemetry-distro (>=0.350b0)"] +huey = ["huey (>=2)"] +opentelemetry = ["opentelemetry-distro (>=0.35b0)"] pure-eval = ["asttokens", "executing", "pure-eval"] pymongo = ["pymongo (>=3.1)"] pyspark = ["pyspark (>=2.4.4)"] From c10e13125057e506381d1be8c2ec1394eee45d62 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 13 Feb 2023 11:49:20 +0000 Subject: [PATCH 181/344] Apply logging from hotfixes branch to develop (#15054) * Apply logging from hotfixes branch to develop Part of #4826. Originally added in #11882. * Changelog --- changelog.d/15054.misc | 1 + synapse/rest/client/account.py | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/15054.misc diff --git a/changelog.d/15054.misc b/changelog.d/15054.misc new file mode 100644 index 000000000000..d800b107cf14 --- /dev/null +++ b/changelog.d/15054.misc @@ -0,0 +1 @@ +Merge debug logging from the hotfixes branch. diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 4373c7366262..232f3a976d76 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -415,6 +415,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: request, MsisdnRequestTokenBody ) msisdn = phone_number_to_msisdn(body.country, body.phone_number) + logger.info("Request #%s to verify ownership of %s", body.send_attempt, msisdn) if not await check_3pid_allowed(self.hs, "msisdn", msisdn): raise SynapseError( @@ -444,6 +445,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await self.hs.get_clock().sleep(random.randint(1, 10) / 10) return 200, {"sid": random_string(16)} + logger.info("MSISDN %s is already in use by %s", msisdn, existing_user_id) raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE) if not self.hs.config.registration.account_threepid_delegate_msisdn: @@ -468,6 +470,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: threepid_send_requests.labels(type="msisdn", reason="add_threepid").observe( body.send_attempt ) + logger.info("MSISDN %s: got response from identity server: %s", msisdn, ret) return 200, ret From bdccfd24773d7482ae497263634312640dab01d1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 13 Feb 2023 12:12:48 +0000 Subject: [PATCH 182/344] Refactor arguments of `try_unbind_threepid(_with_id_server)` from dict to separate args (#15053) --- changelog.d/15053.misc | 1 + synapse/handlers/auth.py | 5 ++- synapse/handlers/deactivate_account.py | 7 +--- synapse/handlers/identity.py | 47 +++++++++++++------------- synapse/rest/client/account.py | 7 +--- 5 files changed, 28 insertions(+), 39 deletions(-) create mode 100644 changelog.d/15053.misc diff --git a/changelog.d/15053.misc b/changelog.d/15053.misc new file mode 100644 index 000000000000..c27528f5c6e5 --- /dev/null +++ b/changelog.d/15053.misc @@ -0,0 +1 @@ +Refactor arguments of `try_unbind_threepid` and `_try_unbind_threepid_with_id_server` to not use dictionaries. \ No newline at end of file diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 30f2d46c3c4a..57a6854b1e08 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1593,9 +1593,8 @@ async def delete_threepid( if medium == "email": address = canonicalise_email(address) - identity_handler = self.hs.get_identity_handler() - result = await identity_handler.try_unbind_threepid( - user_id, {"medium": medium, "address": address, "id_server": id_server} + result = await self.hs.get_identity_handler().try_unbind_threepid( + user_id, medium, address, id_server ) await self.store.user_delete_threepid(user_id, medium, address) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index d74d135c0c50..d24f64938219 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -106,12 +106,7 @@ async def deactivate_account( for threepid in threepids: try: result = await self._identity_handler.try_unbind_threepid( - user_id, - { - "medium": threepid["medium"], - "address": threepid["address"], - "id_server": id_server, - }, + user_id, threepid["medium"], threepid["address"], id_server ) identity_server_supports_unbinding &= result except Exception: diff --git a/synapse/handlers/identity.py b/synapse/handlers/identity.py index 848e46eb9ba6..bf0f7acf8076 100644 --- a/synapse/handlers/identity.py +++ b/synapse/handlers/identity.py @@ -219,28 +219,31 @@ async def bind_threepid( data = json_decoder.decode(e.msg) # XXX WAT? return data - async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool: - """Attempt to remove a 3PID from an identity server, or if one is not provided, all - identity servers we're aware the binding is present on + async def try_unbind_threepid( + self, mxid: str, medium: str, address: str, id_server: Optional[str] + ) -> bool: + """Attempt to remove a 3PID from one or more identity servers. Args: mxid: Matrix user ID of binding to be removed - threepid: Dict with medium & address of binding to be - removed, and an optional id_server. + medium: The medium of the third-party ID. + address: The address of the third-party ID. + id_server: An identity server to attempt to unbind from. If None, + attempt to remove the association from all identity servers + known to potentially have it. Raises: - SynapseError: If we failed to contact the identity server + SynapseError: If we failed to contact one or more identity servers. Returns: - True on success, otherwise False if the identity - server doesn't support unbinding (or no identity server found to - contact). + True on success, otherwise False if the identity server doesn't + support unbinding (or no identity server to contact was found). """ - if threepid.get("id_server"): - id_servers = [threepid["id_server"]] + if id_server: + id_servers = [id_server] else: id_servers = await self.store.get_id_servers_user_bound( - user_id=mxid, medium=threepid["medium"], address=threepid["address"] + mxid, medium, address ) # We don't know where to unbind, so we don't have a choice but to return @@ -249,20 +252,21 @@ async def try_unbind_threepid(self, mxid: str, threepid: dict) -> bool: changed = True for id_server in id_servers: - changed &= await self.try_unbind_threepid_with_id_server( - mxid, threepid, id_server + changed &= await self._try_unbind_threepid_with_id_server( + mxid, medium, address, id_server ) return changed - async def try_unbind_threepid_with_id_server( - self, mxid: str, threepid: dict, id_server: str + async def _try_unbind_threepid_with_id_server( + self, mxid: str, medium: str, address: str, id_server: str ) -> bool: """Removes a binding from an identity server Args: mxid: Matrix user ID of binding to be removed - threepid: Dict with medium & address of binding to be removed + medium: The medium of the third-party ID + address: The address of the third-party ID id_server: Identity server to unbind from Raises: @@ -286,7 +290,7 @@ async def try_unbind_threepid_with_id_server( content = { "mxid": mxid, - "threepid": {"medium": threepid["medium"], "address": threepid["address"]}, + "threepid": {"medium": medium, "address": address}, } # we abuse the federation http client to sign the request, but we have to send it @@ -319,12 +323,7 @@ async def try_unbind_threepid_with_id_server( except RequestTimedOutError: raise SynapseError(500, "Timed out contacting identity server") - await self.store.remove_user_bound_threepid( - user_id=mxid, - medium=threepid["medium"], - address=threepid["address"], - id_server=id_server, - ) + await self.store.remove_user_bound_threepid(mxid, medium, address, id_server) return changed diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 232f3a976d76..662f5bf7621b 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -737,12 +737,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # Attempt to unbind the threepid from an identity server. If id_server is None, try to # unbind from all identity servers this threepid has been added to in the past result = await self.identity_handler.try_unbind_threepid( - requester.user.to_string(), - { - "address": body.address, - "medium": body.medium, - "id_server": body.id_server, - }, + requester.user.to_string(), body.medium, body.address, body.id_server ) return 200, {"id_server_unbind_result": "success" if result else "no-support"} From bac123c9d3d2c614c7394d5af371567334939307 Mon Sep 17 00:00:00 2001 From: V02460 Date: Mon, 13 Feb 2023 13:25:56 +0100 Subject: [PATCH 183/344] Update pyo3-log to v0.8.1 (#15043) Signed-off-by: Kai A. Hiller --- Cargo.lock | 4 ++-- changelog.d/15043.misc | 1 + rust/Cargo.toml | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15043.misc diff --git a/Cargo.lock b/Cargo.lock index d44191ff3835..1bf76cb863ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "pyo3-log" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5695ccff5060c13ca1751cf8c857a12da9b0bf0378cb071c5e0326f7c7e4c1b" +checksum = "f9c8b57fe71fb5dcf38970ebedc2b1531cf1c14b1b9b4c560a182a57e115575c" dependencies = [ "arc-swap", "log", diff --git a/changelog.d/15043.misc b/changelog.d/15043.misc new file mode 100644 index 000000000000..cb1839412317 --- /dev/null +++ b/changelog.d/15043.misc @@ -0,0 +1 @@ +Update pyo3-log to v0.8.1. diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 09e2bba5e5c2..533a8cc6776f 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -24,7 +24,7 @@ anyhow = "1.0.63" lazy_static = "1.4.0" log = "0.4.17" pyo3 = { version = "0.17.1", features = ["macros", "anyhow", "abi3", "abi3-py37"] } -pyo3-log = "0.7.0" +pyo3-log = "0.8.1" pythonize = "0.17.0" regex = "1.6.0" serde = { version = "1.0.144", features = ["derive"] } From 3d7aead5d62e6da97e006199b3f957325e54b053 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 13 Feb 2023 16:30:58 +0000 Subject: [PATCH 184/344] Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. (#14834) --- changelog.d/14834.misc | 1 + synapse/handlers/room_summary.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/14834.misc diff --git a/changelog.d/14834.misc b/changelog.d/14834.misc new file mode 100644 index 000000000000..e683212dc463 --- /dev/null +++ b/changelog.d/14834.misc @@ -0,0 +1 @@ +Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. \ No newline at end of file diff --git a/synapse/handlers/room_summary.py b/synapse/handlers/room_summary.py index 4472019fbcd2..807245160db6 100644 --- a/synapse/handlers/room_summary.py +++ b/synapse/handlers/room_summary.py @@ -521,8 +521,8 @@ async def _is_local_room_accessible( It should return true if: - * The requester is joined or can join the room (per MSC3173). - * The origin server has any user that is joined or can join the room. + * The requesting user is joined or can join the room (per MSC3173); or + * The origin server has any user that is joined or can join the room; or * The history visibility is set to world readable. Args: From c0bf4c3cb43abb90945591f5c3edb6ac45be2afd Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 13 Feb 2023 18:15:38 +0000 Subject: [PATCH 185/344] Add check to ensure locked dependencies have source distributions available. (#14742) --- .github/workflows/poetry_lockfile.yaml | 24 ++++++++ changelog.d/14742.misc | 1 + poetry.lock | 2 +- pyproject.toml | 3 + scripts-dev/check_locked_deps_have_sdists.py | 58 ++++++++++++++++++++ 5 files changed, 87 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/poetry_lockfile.yaml create mode 100644 changelog.d/14742.misc create mode 100755 scripts-dev/check_locked_deps_have_sdists.py diff --git a/.github/workflows/poetry_lockfile.yaml b/.github/workflows/poetry_lockfile.yaml new file mode 100644 index 000000000000..ae4d27f2de44 --- /dev/null +++ b/.github/workflows/poetry_lockfile.yaml @@ -0,0 +1,24 @@ +on: + push: + branches: ["develop", "release-*"] + paths: + - poetry.lock + pull_request: + paths: + - poetry.lock + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + check-sdists: + name: "Check locked dependencies have sdists" + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: '3.x' + - run: pip install tomli + - run: ./scripts-dev/check_locked_deps_have_sdists.py diff --git a/changelog.d/14742.misc b/changelog.d/14742.misc new file mode 100644 index 000000000000..c0b5d2c0620e --- /dev/null +++ b/changelog.d/14742.misc @@ -0,0 +1 @@ +Add check to ensure locked dependencies have source distributions available. \ No newline at end of file diff --git a/poetry.lock b/poetry.lock index 688fd2ac26f9..e534b30d2b21 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3027,4 +3027,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "16528ddab686d1bc3180ff37b09de35b904f68516cfdcc3942844163a126255e" +content-hash = "95cb043fa56e1e3275ba7f74b68b2191bd5886eea3e06b8cd370d7fc9fea3c07" diff --git a/pyproject.toml b/pyproject.toml index 21621510c8bf..880f2d3bd83e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -346,6 +346,9 @@ twine = "*" # Towncrier min version comes from #3425. Rationale unclear. towncrier = ">=18.6.0rc1" +# Used for checking the Poetry lockfile +tomli = ">=1.2.3" + [build-system] # The upper bounds here are defensive, intended to prevent situations like # #13849 and #14079 where we see buildtime or runtime errors caused by build diff --git a/scripts-dev/check_locked_deps_have_sdists.py b/scripts-dev/check_locked_deps_have_sdists.py new file mode 100755 index 000000000000..63ad99280a0d --- /dev/null +++ b/scripts-dev/check_locked_deps_have_sdists.py @@ -0,0 +1,58 @@ +#! /usr/bin/env python +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +from pathlib import Path +from typing import Dict, List + +import tomli + + +def main() -> None: + lockfile_path = Path(__file__).parent.parent.joinpath("poetry.lock") + with open(lockfile_path, "rb") as lockfile: + lockfile_content = tomli.load(lockfile) + + # Poetry 1.3+ lockfile format: + # There's a `files` inline table in each [[package]] + packages_to_assets: Dict[str, List[Dict[str, str]]] = { + package["name"]: package["files"] for package in lockfile_content["package"] + } + + success = True + + for package_name, assets in packages_to_assets.items(): + has_sdist = any(asset["file"].endswith(".tar.gz") for asset in assets) + if not has_sdist: + success = False + print( + f"Locked package {package_name!r} does not have a source distribution!", + file=sys.stderr, + ) + + if not success: + print( + "\nThere were some problems with the Poetry lockfile (poetry.lock).", + file=sys.stderr, + ) + sys.exit(1) + + print( + f"Poetry lockfile OK. {len(packages_to_assets)} locked packages checked.", + file=sys.stderr, + ) + + +if __name__ == "__main__": + main() From db2b105d69fa331bb3f050df82266314f61577ea Mon Sep 17 00:00:00 2001 From: Harishankar Kumar <31770598+hari01584@users.noreply.github.com> Date: Tue, 14 Feb 2023 15:07:08 +0530 Subject: [PATCH 186/344] Change collection[str] to StrCollection in event_auth code (#14929) Signed-off-by: Harishankar Kumar --- changelog.d/14929.misc | 1 + synapse/event_auth.py | 23 ++++++++----------- synapse/events/__init__.py | 6 ++--- synapse/storage/databases/main/events.py | 7 +++--- .../databases/main/events_bg_updates.py | 6 ++--- 5 files changed, 19 insertions(+), 24 deletions(-) create mode 100644 changelog.d/14929.misc diff --git a/changelog.d/14929.misc b/changelog.d/14929.misc new file mode 100644 index 000000000000..2cc3614dfded --- /dev/null +++ b/changelog.d/14929.misc @@ -0,0 +1 @@ +Use `StrCollection` to avoid potential bugs with `Collection[str]`. diff --git a/synapse/event_auth.py b/synapse/event_auth.py index e0be9f88cc9d..4d6d1b8ebd58 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -16,18 +16,7 @@ import collections.abc import logging import typing -from typing import ( - Any, - Collection, - Dict, - Iterable, - List, - Mapping, - Optional, - Set, - Tuple, - Union, -) +from typing import Any, Dict, Iterable, List, Mapping, Optional, Set, Tuple, Union from canonicaljson import encode_canonical_json from signedjson.key import decode_verify_key_bytes @@ -56,7 +45,13 @@ RoomVersions, ) from synapse.storage.databases.main.events_worker import EventRedactBehaviour -from synapse.types import MutableStateMap, StateMap, UserID, get_domain_from_id +from synapse.types import ( + MutableStateMap, + StateMap, + StrCollection, + UserID, + get_domain_from_id, +) if typing.TYPE_CHECKING: # conditional imports to avoid import cycle @@ -69,7 +64,7 @@ class _EventSourceStore(Protocol): async def get_events( self, - event_ids: Collection[str], + event_ids: StrCollection, redact_behaviour: EventRedactBehaviour, get_prev_content: bool = False, allow_rejected: bool = False, diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 8aca9a3ab9e9..91118a8d847f 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -39,7 +39,7 @@ from synapse.api.constants import RelationTypes from synapse.api.room_versions import EventFormatVersions, RoomVersion, RoomVersions -from synapse.types import JsonDict, RoomStreamToken +from synapse.types import JsonDict, RoomStreamToken, StrCollection from synapse.util.caches import intern_dict from synapse.util.frozenutils import freeze from synapse.util.stringutils import strtobool @@ -413,7 +413,7 @@ def prev_event_ids(self) -> Sequence[str]: """ return [e for e, _ in self._dict["prev_events"]] - def auth_event_ids(self) -> Sequence[str]: + def auth_event_ids(self) -> StrCollection: """Returns the list of auth event IDs. The order matches the order specified in the event, though there is no meaning to it. @@ -558,7 +558,7 @@ def prev_event_ids(self) -> Sequence[str]: """ return self._dict["prev_events"] - def auth_event_ids(self) -> Sequence[str]: + def auth_event_ids(self) -> StrCollection: """Returns the list of auth event IDs. The order matches the order specified in the event, though there is no meaning to it. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index ffe766fd5646..7996cbb557d0 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -25,7 +25,6 @@ Iterable, List, Optional, - Sequence, Set, Tuple, ) @@ -51,7 +50,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import AbstractStreamIdGenerator from synapse.storage.util.sequence import SequenceGenerator -from synapse.types import JsonDict, StateMap, get_domain_from_id +from synapse.types import JsonDict, StateMap, StrCollection, get_domain_from_id from synapse.util import json_encoder from synapse.util.iterutils import batch_iter, sorted_topologically from synapse.util.stringutils import non_null_str_or_none @@ -552,7 +551,7 @@ def _add_chain_cover_index( event_chain_id_gen: SequenceGenerator, event_to_room_id: Dict[str, str], event_to_types: Dict[str, Tuple[str, str]], - event_to_auth_chain: Dict[str, Sequence[str]], + event_to_auth_chain: Dict[str, StrCollection], ) -> None: """Calculate the chain cover index for the given events. @@ -846,7 +845,7 @@ def _allocate_chain_ids( event_chain_id_gen: SequenceGenerator, event_to_room_id: Dict[str, str], event_to_types: Dict[str, Tuple[str, str]], - event_to_auth_chain: Dict[str, Sequence[str]], + event_to_auth_chain: Dict[str, StrCollection], events_to_calc_chain_id_for: Set[str], chain_map: Dict[str, Tuple[int, int]], ) -> Dict[str, Tuple[int, int]]: diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index b9d3c36d602c..584536111daa 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Set, Tuple, cast +from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast import attr @@ -29,7 +29,7 @@ ) from synapse.storage.databases.main.events import PersistEventsStore from synapse.storage.types import Cursor -from synapse.types import JsonDict +from synapse.types import JsonDict, StrCollection if TYPE_CHECKING: from synapse.server import HomeServer @@ -1061,7 +1061,7 @@ def _calculate_chain_cover_txn( self.event_chain_id_gen, # type: ignore[attr-defined] event_to_room_id, event_to_types, - cast(Dict[str, Sequence[str]], event_to_auth_chain), + cast(Dict[str, StrCollection], event_to_auth_chain), ) return _CalculateChainCover( From f09db5c9918b6aaeb1f53ab4fac3a7f05f512c5f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 Feb 2023 12:10:29 +0100 Subject: [PATCH 187/344] Skip calculating unread push actions in `/sync` when `enable_push` is false. (#14980) --- changelog.d/14980.misc | 1 + synapse/handlers/sync.py | 8 ++++++++ synapse/storage/databases/main/event_push_actions.py | 7 +++++++ 3 files changed, 16 insertions(+) create mode 100644 changelog.d/14980.misc diff --git a/changelog.d/14980.misc b/changelog.d/14980.misc new file mode 100644 index 000000000000..145f4a788b8e --- /dev/null +++ b/changelog.d/14980.misc @@ -0,0 +1 @@ +Skip calculating unread push actions in /sync when enable_push is false. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 4bae46158acf..3a9cddf15adc 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -269,6 +269,8 @@ def __init__(self, hs: "HomeServer"): self._state_storage_controller = self._storage_controllers.state self._device_handler = hs.get_device_handler() + self.should_calculate_push_rules = hs.config.push.enable_push + # TODO: flush cache entries on subsequent sync request. # Once we get the next /sync request (ie, one with the same access token # that sets 'since' to 'next_batch'), we know that device won't need a @@ -1288,6 +1290,12 @@ async def _find_missing_partial_state_memberships( async def unread_notifs_for_room_id( self, room_id: str, sync_config: SyncConfig ) -> RoomNotifCounts: + if not self.should_calculate_push_rules: + # If push rules have been universally disabled then we know we won't + # have any unread counts in the DB, so we may as well skip asking + # the DB. + return RoomNotifCounts.empty() + with Measure(self.clock, "unread_notifs_for_room_id"): return await self.store.get_unread_event_push_actions_by_room_for_user( diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 3a0c370fde1d..eeccf5db2433 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -203,11 +203,18 @@ class RoomNotifCounts: # Map of thread ID to the notification counts. threads: Dict[str, NotifCounts] + @staticmethod + def empty() -> "RoomNotifCounts": + return _EMPTY_ROOM_NOTIF_COUNTS + def __len__(self) -> int: # To properly account for the amount of space in any caches. return len(self.threads) + 1 +_EMPTY_ROOM_NOTIF_COUNTS = RoomNotifCounts(NotifCounts(), {}) + + def _serialize_action( actions: Collection[Union[Mapping, str]], is_highlight: bool ) -> str: From cb262713b701d1abcbca03334d17e2d0f81eee4a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 14 Feb 2023 12:20:25 +0100 Subject: [PATCH 188/344] Fix clashing DB txn name (#15070) * Fix clashing DB txn name * Newsfile --- changelog.d/15070.misc | 1 + synapse/storage/databases/main/end_to_end_keys.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15070.misc diff --git a/changelog.d/15070.misc b/changelog.d/15070.misc new file mode 100644 index 000000000000..0f3244de9fa3 --- /dev/null +++ b/changelog.d/15070.misc @@ -0,0 +1 @@ +Fix clashing database transaction name. diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 752dc16e1773..2c2d1456666b 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -262,7 +262,7 @@ async def get_e2e_device_keys_and_signatures( for batch in batch_iter(signature_query, 50): cross_sigs_result = await self.db_pool.runInteraction( - "get_e2e_cross_signing_signatures", + "get_e2e_cross_signing_signatures_for_devices", self._get_e2e_cross_signing_signatures_for_devices_txn, batch, ) From 3e90dfdd81c2c6dcaf1f21f24fbca0a4c820b150 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 14 Feb 2023 12:59:15 +0100 Subject: [PATCH 189/344] 1.77.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index a2cb957f1695..a62bd4eb28a3 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.77.0 (2023-02-14) +=========================== + +No significant changes since 1.77.0rc2. + + Synapse 1.77.0rc2 (2023-02-10) ============================== diff --git a/debian/changelog b/debian/changelog index 461953742bac..ea651438f1dc 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.77.0) stable; urgency=medium + + * New Synapse release 1.77.0. + + -- Synapse Packaging team Tue, 14 Feb 2023 12:59:02 +0100 + matrix-synapse-py3 (1.77.0~rc2) stable; urgency=medium * New Synapse release 1.77.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 921a1fccbc94..59804af1f56f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.77.0rc2" +version = "1.77.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 463c19ac3648b242c480e299349d2ef90bf38a0b Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Tue, 14 Feb 2023 12:32:19 +0000 Subject: [PATCH 190/344] Faster joins: Omit device list updates from partial state rooms in /sync (#15069) ...when lazy loading of members is not enabled. It's weird to notify a client that another user's device list has changed when the client doesn't think that they share a room. Note that when a room is un-partial stated, device list updates are emitted for every member in that room over /sync. Signed-off-by: Sean Quah --- changelog.d/15069.misc | 1 + synapse/handlers/sync.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/15069.misc diff --git a/changelog.d/15069.misc b/changelog.d/15069.misc new file mode 100644 index 000000000000..e7a619ad2b5b --- /dev/null +++ b/changelog.d/15069.misc @@ -0,0 +1 @@ +Faster joins: omit device list updates originating from partial state rooms in /sync responses without lazy loading of members enabled. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 3a9cddf15adc..4e4595312c4d 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1399,6 +1399,11 @@ async def generate_sync_result( for room_id, is_partial_state in results.items() if is_partial_state ) + membership_change_events = [ + event + for event in membership_change_events + if not results.get(event.room_id, False) + ] # Incremental eager syncs should additionally include rooms that # - we are joined to From e9b1ff9f31f8ff093e7eaf9c54fa8f40a3b66aa8 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 14 Feb 2023 15:50:59 +0000 Subject: [PATCH 191/344] Prevent clients from reporting nonexistent events. (#13779) --- changelog.d/13779.bugfix | 1 + synapse/rest/client/report_event.py | 11 ++++++++++- tests/rest/client/test_report_event.py | 12 ++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) create mode 100644 changelog.d/13779.bugfix diff --git a/changelog.d/13779.bugfix b/changelog.d/13779.bugfix new file mode 100644 index 000000000000..a92c722c6ea6 --- /dev/null +++ b/changelog.d/13779.bugfix @@ -0,0 +1 @@ +Prevent clients from reporting nonexistent events. \ No newline at end of file diff --git a/synapse/rest/client/report_event.py b/synapse/rest/client/report_event.py index e2b410cf32a3..9be58602210b 100644 --- a/synapse/rest/client/report_event.py +++ b/synapse/rest/client/report_event.py @@ -16,7 +16,7 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Tuple -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -39,6 +39,7 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.clock = hs.get_clock() self.store = hs.get_datastores().main + self._event_handler = self.hs.get_event_handler() async def on_POST( self, request: SynapseRequest, room_id: str, event_id: str @@ -61,6 +62,14 @@ async def on_POST( Codes.BAD_JSON, ) + event = await self._event_handler.get_event( + requester.user, room_id, event_id, show_redacted=False + ) + if event is None: + raise NotFoundError( + "Unable to report event: it does not exist or you aren't able to see it." + ) + await self.store.add_event_report( room_id=room_id, event_id=event_id, diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py index 7cb1017a4a44..1250685d39ce 100644 --- a/tests/rest/client/test_report_event.py +++ b/tests/rest/client/test_report_event.py @@ -73,6 +73,18 @@ def test_reason_and_score_null(self) -> None: data = {"reason": None, "score": None} self._assert_status(400, data) + def test_cannot_report_nonexistent_event(self) -> None: + """ + Tests that we don't accept event reports for events which do not exist. + """ + channel = self.make_request( + "POST", + f"rooms/{self.room_id}/report/$nonsenseeventid:test", + {"reason": "i am very sad"}, + access_token=self.other_user_tok, + ) + self.assertEqual(404, channel.code, msg=channel.result["body"]) + def _assert_status(self, response_status: int, data: JsonDict) -> None: channel = self.make_request( "POST", self.report_path, data, access_token=self.other_user_tok From 157c571f3e9d3d09cd763405b6a9eb967f2807e7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 14 Feb 2023 18:19:58 +0000 Subject: [PATCH 192/344] Remove spurious `dont_notify` action from `.m.rule.reaction` (#15073) This does nothing and I want to remove it from the MSC. --- changelog.d/15073.feature | 1 + rust/src/push/base_rules.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15073.feature diff --git a/changelog.d/15073.feature b/changelog.d/15073.feature new file mode 100644 index 000000000000..2889e3444f5d --- /dev/null +++ b/changelog.d/15073.feature @@ -0,0 +1 @@ +Remove spurious `dont_notify` action from the defaults for the `.m.rule.reaction` pushrule. diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 97d0a0a7e220..dcbca340fece 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -223,7 +223,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ pattern_type: None, }, ))]), - actions: Cow::Borrowed(&[Action::DontNotify]), + actions: Cow::Borrowed(&[]), default: true, default_enabled: true, }, From 119e0795a58548fb38fab299e7c362fcbb388d68 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 14 Feb 2023 14:02:19 -0500 Subject: [PATCH 193/344] Implement MSC3966: Add a push rule condition to search for a value in an array. (#15045) The `exact_event_property_contains` condition can be used to search for a value inside of an array. --- changelog.d/15045.feature | 1 + rust/benches/evaluator.rs | 32 +++++++----- rust/src/push/evaluator.rs | 65 +++++++++++++++++++----- rust/src/push/mod.rs | 33 +++++++++++- stubs/synapse/synapse_rust/push.pyi | 7 +-- synapse/config/experimental.py | 5 ++ synapse/push/bulk_push_rule_evaluator.py | 21 +++++--- synapse/types/__init__.py | 1 + tests/push/test_push_rule_evaluator.py | 53 +++++++++++++++++-- 9 files changed, 176 insertions(+), 42 deletions(-) create mode 100644 changelog.d/15045.feature diff --git a/changelog.d/15045.feature b/changelog.d/15045.feature new file mode 100644 index 000000000000..87766befdab8 --- /dev/null +++ b/changelog.d/15045.feature @@ -0,0 +1 @@ +Experimental support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): the `exact_event_property_contains` push rule condition. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 229553ebf8fe..8213dfd9eab6 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -15,8 +15,8 @@ #![feature(test)] use std::collections::BTreeSet; use synapse::push::{ - evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, PushRules, - SimpleJsonValue, + evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue, + PushRules, SimpleJsonValue, }; use test::Bencher; @@ -27,15 +27,15 @@ fn bench_match_exact(b: &mut Bencher) { let flattened_keys = [ ( "type".to_string(), - SimpleJsonValue::Str("m.text".to_string()), + JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), ), ( "room_id".to_string(), - SimpleJsonValue::Str("!room:server".to_string()), + JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), ), ( "content.body".to_string(), - SimpleJsonValue::Str("test message".to_string()), + JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), ), ] .into_iter() @@ -54,6 +54,7 @@ fn bench_match_exact(b: &mut Bencher) { vec![], false, false, + false, ) .unwrap(); @@ -76,15 +77,15 @@ fn bench_match_word(b: &mut Bencher) { let flattened_keys = [ ( "type".to_string(), - SimpleJsonValue::Str("m.text".to_string()), + JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), ), ( "room_id".to_string(), - SimpleJsonValue::Str("!room:server".to_string()), + JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), ), ( "content.body".to_string(), - SimpleJsonValue::Str("test message".to_string()), + JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), ), ] .into_iter() @@ -103,6 +104,7 @@ fn bench_match_word(b: &mut Bencher) { vec![], false, false, + false, ) .unwrap(); @@ -125,15 +127,15 @@ fn bench_match_word_miss(b: &mut Bencher) { let flattened_keys = [ ( "type".to_string(), - SimpleJsonValue::Str("m.text".to_string()), + JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), ), ( "room_id".to_string(), - SimpleJsonValue::Str("!room:server".to_string()), + JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), ), ( "content.body".to_string(), - SimpleJsonValue::Str("test message".to_string()), + JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), ), ] .into_iter() @@ -152,6 +154,7 @@ fn bench_match_word_miss(b: &mut Bencher) { vec![], false, false, + false, ) .unwrap(); @@ -174,15 +177,15 @@ fn bench_eval_message(b: &mut Bencher) { let flattened_keys = [ ( "type".to_string(), - SimpleJsonValue::Str("m.text".to_string()), + JsonValue::Value(SimpleJsonValue::Str("m.text".to_string())), ), ( "room_id".to_string(), - SimpleJsonValue::Str("!room:server".to_string()), + JsonValue::Value(SimpleJsonValue::Str("!room:server".to_string())), ), ( "content.body".to_string(), - SimpleJsonValue::Str("test message".to_string()), + JsonValue::Value(SimpleJsonValue::Str("test message".to_string())), ), ] .into_iter() @@ -201,6 +204,7 @@ fn bench_eval_message(b: &mut Bencher) { vec![], false, false, + false, ) .unwrap(); diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index dd6b4343ec2c..2eaa06ad7614 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -14,6 +14,7 @@ use std::collections::{BTreeMap, BTreeSet}; +use crate::push::JsonValue; use anyhow::{Context, Error}; use lazy_static::lazy_static; use log::warn; @@ -63,7 +64,7 @@ impl RoomVersionFeatures { pub struct PushRuleEvaluator { /// A mapping of "flattened" keys to simple JSON values in the event, e.g. /// includes things like "type" and "content.msgtype". - flattened_keys: BTreeMap, + flattened_keys: BTreeMap, /// The "content.body", if any. body: String, @@ -87,7 +88,7 @@ pub struct PushRuleEvaluator { /// The related events, indexed by relation type. Flattened in the same manner as /// `flattened_keys`. - related_events_flattened: BTreeMap>, + related_events_flattened: BTreeMap>, /// If msc3664, push rules for related events, is enabled. related_event_match_enabled: bool, @@ -101,6 +102,9 @@ pub struct PushRuleEvaluator { /// If MSC3758 (exact_event_match push rule condition) is enabled. msc3758_exact_event_match: bool, + + /// If MSC3966 (exact_event_property_contains push rule condition) is enabled. + msc3966_exact_event_property_contains: bool, } #[pymethods] @@ -109,21 +113,22 @@ impl PushRuleEvaluator { #[allow(clippy::too_many_arguments)] #[new] pub fn py_new( - flattened_keys: BTreeMap, + flattened_keys: BTreeMap, has_mentions: bool, user_mentions: BTreeSet, room_mention: bool, room_member_count: u64, sender_power_level: Option, notification_power_levels: BTreeMap, - related_events_flattened: BTreeMap>, + related_events_flattened: BTreeMap>, related_event_match_enabled: bool, room_version_feature_flags: Vec, msc3931_enabled: bool, msc3758_exact_event_match: bool, + msc3966_exact_event_property_contains: bool, ) -> Result { let body = match flattened_keys.get("content.body") { - Some(SimpleJsonValue::Str(s)) => s.clone(), + Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(), _ => String::new(), }; @@ -141,6 +146,7 @@ impl PushRuleEvaluator { room_version_feature_flags, msc3931_enabled, msc3758_exact_event_match, + msc3966_exact_event_property_contains, }) } @@ -263,6 +269,9 @@ impl PushRuleEvaluator { KnownCondition::RelatedEventMatch(event_match) => { self.match_related_event_match(event_match, user_id)? } + KnownCondition::ExactEventPropertyContains(exact_event_match) => { + self.match_exact_event_property_contains(exact_event_match)? + } KnownCondition::IsUserMention => { if let Some(uid) = user_id { self.user_mentions.contains(uid) @@ -345,7 +354,7 @@ impl PushRuleEvaluator { return Ok(false); }; - let haystack = if let Some(SimpleJsonValue::Str(haystack)) = + let haystack = if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) = self.flattened_keys.get(&*event_match.key) { haystack @@ -377,7 +386,9 @@ impl PushRuleEvaluator { let value = &exact_event_match.value; - let haystack = if let Some(haystack) = self.flattened_keys.get(&*exact_event_match.key) { + let haystack = if let Some(JsonValue::Value(haystack)) = + self.flattened_keys.get(&*exact_event_match.key) + { haystack } else { return Ok(false); @@ -441,11 +452,12 @@ impl PushRuleEvaluator { return Ok(false); }; - let haystack = if let Some(SimpleJsonValue::Str(haystack)) = event.get(&**key) { - haystack - } else { - return Ok(false); - }; + let haystack = + if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) = event.get(&**key) { + haystack + } else { + return Ok(false); + }; // For the content.body we match against "words", but for everything // else we match against the entire value. @@ -459,6 +471,29 @@ impl PushRuleEvaluator { compiled_pattern.is_match(haystack) } + /// Evaluates a `exact_event_property_contains` condition. (MSC3758) + fn match_exact_event_property_contains( + &self, + exact_event_match: &ExactEventMatchCondition, + ) -> Result { + // First check if the feature is enabled. + if !self.msc3966_exact_event_property_contains { + return Ok(false); + } + + let value = &exact_event_match.value; + + let haystack = if let Some(JsonValue::Array(haystack)) = + self.flattened_keys.get(&*exact_event_match.key) + { + haystack + } else { + return Ok(false); + }; + + Ok(haystack.contains(&**value)) + } + /// Match the member count against an 'is' condition /// The `is` condition can be things like '>2', '==3' or even just '4'. fn match_member_count(&self, is: &str) -> Result { @@ -488,7 +523,7 @@ fn push_rule_evaluator() { let mut flattened_keys = BTreeMap::new(); flattened_keys.insert( "content.body".to_string(), - SimpleJsonValue::Str("foo bar bob hello".to_string()), + JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())), ); let evaluator = PushRuleEvaluator::py_new( flattened_keys, @@ -503,6 +538,7 @@ fn push_rule_evaluator() { vec![], true, true, + true, ) .unwrap(); @@ -519,7 +555,7 @@ fn test_requires_room_version_supports_condition() { let mut flattened_keys = BTreeMap::new(); flattened_keys.insert( "content.body".to_string(), - SimpleJsonValue::Str("foo bar bob hello".to_string()), + JsonValue::Value(SimpleJsonValue::Str("foo bar bob hello".to_string())), ); let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()]; let evaluator = PushRuleEvaluator::py_new( @@ -535,6 +571,7 @@ fn test_requires_room_version_supports_condition() { flags, true, true, + true, ) .unwrap(); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 79e519fe111d..253b5f367c67 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -58,7 +58,7 @@ use anyhow::{Context, Error}; use log::warn; use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; -use pyo3::types::{PyBool, PyLong, PyString}; +use pyo3::types::{PyBool, PyList, PyLong, PyString}; use pythonize::{depythonize, pythonize}; use serde::de::Error as _; use serde::{Deserialize, Serialize}; @@ -280,6 +280,35 @@ impl<'source> FromPyObject<'source> for SimpleJsonValue { } } +/// A JSON values (list, string, int, boolean, or null). +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(untagged)] +pub enum JsonValue { + Array(Vec), + Value(SimpleJsonValue), +} + +impl<'source> FromPyObject<'source> for JsonValue { + fn extract(ob: &'source PyAny) -> PyResult { + if let Ok(l) = ::try_from(ob) { + match l.iter().map(SimpleJsonValue::extract).collect() { + Ok(a) => Ok(JsonValue::Array(a)), + Err(e) => Err(PyTypeError::new_err(format!( + "Can't convert to JsonValue::Array: {}", + e + ))), + } + } else if let Ok(v) = SimpleJsonValue::extract(ob) { + Ok(JsonValue::Value(v)) + } else { + Err(PyTypeError::new_err(format!( + "Can't convert from {} to JsonValue", + ob.get_type().name()? + ))) + } + } +} + /// A condition used in push rules to match against an event. /// /// We need this split as `serde` doesn't give us the ability to have a @@ -303,6 +332,8 @@ pub enum KnownCondition { ExactEventMatch(ExactEventMatchCondition), #[serde(rename = "im.nheko.msc3664.related_event_match")] RelatedEventMatch(RelatedEventMatchCondition), + #[serde(rename = "org.matrix.msc3966.exact_event_property_contains")] + ExactEventPropertyContains(ExactEventMatchCondition), #[serde(rename = "org.matrix.msc3952.is_user_mention")] IsUserMention, #[serde(rename = "org.matrix.msc3952.is_room_mention")] diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 328f681a29fc..7b33c30cc919 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -14,7 +14,7 @@ from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union -from synapse.types import JsonDict, SimpleJsonValue +from synapse.types import JsonDict, JsonValue class PushRule: @property @@ -56,18 +56,19 @@ def get_base_rule_ids() -> Collection[str]: ... class PushRuleEvaluator: def __init__( self, - flattened_keys: Mapping[str, SimpleJsonValue], + flattened_keys: Mapping[str, JsonValue], has_mentions: bool, user_mentions: Set[str], room_mention: bool, room_member_count: int, sender_power_level: Optional[int], notification_power_levels: Mapping[str, int], - related_events_flattened: Mapping[str, Mapping[str, SimpleJsonValue]], + related_events_flattened: Mapping[str, Mapping[str, JsonValue]], related_event_match_enabled: bool, room_version_feature_flags: Tuple[str, ...], msc3931_enabled: bool, msc3758_exact_event_match: bool, + msc3966_exact_event_property_contains: bool, ): ... def run( self, diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 6ac2f0c10d52..1d294f879866 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -188,3 +188,8 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.msc3958_supress_edit_notifs = experimental.get( "msc3958_supress_edit_notifs", False ) + + # MSC3966: exact_event_property_contains push rule condition. + self.msc3966_exact_event_property_contains = experimental.get( + "msc3966_exact_event_property_contains", False + ) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index f6a5bffb0f6b..2e917c90c4ae 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -44,7 +44,7 @@ from synapse.state import POWER_KEY from synapse.storage.databases.main.roommember import EventIdMembership from synapse.synapse_rust.push import FilteredPushRules, PushRuleEvaluator -from synapse.types import SimpleJsonValue +from synapse.types import JsonValue from synapse.types.state import StateFilter from synapse.util.caches import register_cache from synapse.util.metrics import measure_func @@ -259,13 +259,13 @@ async def _get_power_levels_and_sender_level( async def _related_events( self, event: EventBase - ) -> Dict[str, Dict[str, SimpleJsonValue]]: + ) -> Dict[str, Dict[str, JsonValue]]: """Fetches the related events for 'event'. Sets the im.vector.is_falling_back key if the event is from a fallback relation Returns: Mapping of relation type to flattened events. """ - related_events: Dict[str, Dict[str, SimpleJsonValue]] = {} + related_events: Dict[str, Dict[str, JsonValue]] = {} if self._related_event_match_enabled: related_event_id = event.content.get("m.relates_to", {}).get("event_id") relation_type = event.content.get("m.relates_to", {}).get("rel_type") @@ -429,6 +429,7 @@ async def _action_for_event_by_user( event.room_version.msc3931_push_features, self.hs.config.experimental.msc1767_enabled, # MSC3931 flag self.hs.config.experimental.msc3758_exact_event_match, + self.hs.config.experimental.msc3966_exact_event_property_contains, ) users = rules_by_user.keys() @@ -502,18 +503,22 @@ async def _action_for_event_by_user( StateGroup = Union[object, int] +def _is_simple_value(value: Any) -> bool: + return isinstance(value, (bool, str)) or type(value) is int or value is None + + def _flatten_dict( d: Union[EventBase, Mapping[str, Any]], prefix: Optional[List[str]] = None, - result: Optional[Dict[str, SimpleJsonValue]] = None, + result: Optional[Dict[str, JsonValue]] = None, *, msc3783_escape_event_match_key: bool = False, -) -> Dict[str, SimpleJsonValue]: +) -> Dict[str, JsonValue]: """ Given a JSON dictionary (or event) which might contain sub dictionaries, flatten it into a single layer dictionary by combining the keys & sub-keys. - String, integer, boolean, and null values are kept. All others are dropped. + String, integer, boolean, null or lists of those values are kept. All others are dropped. Transforms: @@ -542,8 +547,10 @@ def _flatten_dict( # nested fields. key = key.replace("\\", "\\\\").replace(".", "\\.") - if isinstance(value, (bool, str)) or type(value) is int or value is None: + if _is_simple_value(value): result[".".join(prefix + [key])] = value + elif isinstance(value, (list, tuple)): + result[".".join(prefix + [key])] = [v for v in value if _is_simple_value(v)] elif isinstance(value, Mapping): # do not set `room_version` due to recursion considerations below _flatten_dict( diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 52e366c8ae71..33363867c456 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -71,6 +71,7 @@ # JSON types. These could be made stronger, but will do for now. # A "simple" (canonical) JSON value. SimpleJsonValue = Optional[Union[str, int, bool]] +JsonValue = Union[List[SimpleJsonValue], Tuple[SimpleJsonValue, ...], SimpleJsonValue] # A JSON-serialisable dict. JsonDict = Dict[str, Any] # A JSON-serialisable mapping; roughly speaking an immutable JSONDict. diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 660344734175..0554d247bce5 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -32,6 +32,7 @@ from synapse.synapse_rust.push import PushRuleEvaluator from synapse.types import JsonDict, JsonMapping, UserID from synapse.util import Clock +from synapse.util.frozenutils import freeze from tests import unittest from tests.test_utils.event_injection import create_event, inject_member_event @@ -57,17 +58,24 @@ def test_nested(self) -> None: ) def test_non_string(self) -> None: - """Booleans, ints, and nulls should be kept while other items are dropped.""" + """String, booleans, ints, nulls and list of those should be kept while other items are dropped.""" input: Dict[str, Any] = { "woo": "woo", "foo": True, "bar": 1, "baz": None, - "fuzz": [], + "fuzz": ["woo", True, 1, None, [], {}], "boo": {}, } self.assertEqual( - {"woo": "woo", "foo": True, "bar": 1, "baz": None}, _flatten_dict(input) + { + "woo": "woo", + "foo": True, + "bar": 1, + "baz": None, + "fuzz": ["woo", True, 1, None], + }, + _flatten_dict(input), ) def test_event(self) -> None: @@ -117,6 +125,7 @@ def test_extensible_events(self) -> None: "room_id": "!test:test", "sender": "@alice:test", "type": "m.room.message", + "content.org.matrix.msc1767.markup": [], } self.assertEqual(expected, _flatten_dict(event)) @@ -128,6 +137,7 @@ def test_extensible_events(self) -> None: "room_id": "!test:test", "sender": "@alice:test", "type": "m.room.message", + "content.org.matrix.msc1767.markup": [], } self.assertEqual(expected, _flatten_dict(event)) @@ -169,6 +179,7 @@ def _get_evaluator( room_version_feature_flags=event.room_version.msc3931_push_features, msc3931_enabled=True, msc3758_exact_event_match=True, + msc3966_exact_event_property_contains=True, ) def test_display_name(self) -> None: @@ -549,6 +560,42 @@ def test_exact_event_match_integer(self) -> None: "incorrect types should not match", ) + def test_exact_event_property_contains(self) -> None: + """Check that exact_event_property_contains conditions work as expected.""" + + condition = { + "kind": "org.matrix.msc3966.exact_event_property_contains", + "key": "content.value", + "value": "foobaz", + } + self._assert_matches( + condition, + {"value": ["foobaz"]}, + "exact value should match", + ) + self._assert_matches( + condition, + {"value": ["foobaz", "bugz"]}, + "extra values should match", + ) + self._assert_not_matches( + condition, + {"value": ["FoobaZ"]}, + "values should match and be case-sensitive", + ) + self._assert_not_matches( + condition, + {"value": "foobaz"}, + "does not search in a string", + ) + + # it should work on frozendicts too + self._assert_matches( + condition, + freeze({"value": ["foobaz"]}), + "values should match on frozendicts", + ) + def test_no_body(self) -> None: """Not having a body shouldn't break the evaluator.""" evaluator = self._get_evaluator({}) From 42aea0d8af1556473b4f31f78d9facb448230a1f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 14 Feb 2023 14:03:35 -0500 Subject: [PATCH 194/344] Add final type hint to tests.unittest. (#15072) Adds a return type to HomeServerTestCase.make_homeserver and deal with any variables which are no longer Any. --- changelog.d/15072.misc | 1 + mypy.ini | 3 - tests/app/test_openid_listener.py | 8 ++- tests/crypto/test_keyring.py | 22 +++---- tests/events/test_presence_router.py | 12 ++-- tests/federation/test_complexity.py | 35 ++++++----- tests/federation/test_federation_catch_up.py | 32 +++++++--- tests/federation/test_federation_client.py | 4 +- tests/federation/test_federation_sender.py | 55 ++++++++--------- tests/handlers/test_appservice.py | 2 +- tests/handlers/test_cas.py | 8 +-- tests/handlers/test_e2e_keys.py | 55 +++++++++-------- tests/handlers/test_federation.py | 18 +++--- tests/handlers/test_federation_event.py | 6 +- tests/handlers/test_message.py | 11 ++-- tests/handlers/test_password_providers.py | 2 +- tests/handlers/test_register.py | 10 ++- tests/handlers/test_saml.py | 14 ++--- tests/handlers/test_typing.py | 12 ++-- tests/handlers/test_user_directory.py | 33 ++++++---- tests/module_api/test_api.py | 14 ++--- tests/push/test_email.py | 51 +++++++++++----- tests/push/test_http.py | 45 +++++++++----- tests/replication/tcp/streams/test_events.py | 10 +-- .../tcp/streams/test_partial_state.py | 2 +- tests/replication/tcp/streams/test_typing.py | 4 +- tests/replication/tcp/test_handler.py | 1 + .../test_federation_sender_shard.py | 2 + tests/replication/test_pusher_shard.py | 1 + tests/rest/admin/test_user.py | 5 +- tests/rest/admin/test_username_available.py | 15 +++-- tests/rest/client/test_account.py | 2 +- tests/rest/client/test_filter.py | 4 +- tests/rest/client/test_presence.py | 10 +-- tests/rest/client/test_register.py | 7 ++- tests/rest/client/test_retention.py | 6 +- tests/rest/client/test_rooms.py | 12 ++-- tests/rest/client/test_shadow_banned.py | 6 +- tests/rest/client/test_upgrade_room.py | 2 +- .../test_resource_limits_server_notices.py | 50 ++++++++------- .../databases/main/test_events_worker.py | 1 + tests/storage/test_event_chain.py | 10 ++- tests/storage/test_event_federation.py | 9 ++- tests/storage/test_events.py | 8 ++- tests/storage/test_keys.py | 61 ++++++++++--------- tests/storage/test_purge.py | 2 +- tests/storage/test_receipts.py | 6 +- tests/storage/test_room_search.py | 3 +- tests/storage/test_stream.py | 4 +- tests/storage/test_unsafe_locale.py | 2 + tests/test_federation.py | 31 ++++++---- tests/test_phone_home.py | 2 +- tests/test_visibility.py | 16 ++--- tests/unittest.py | 4 +- tests/util/test_retryutils.py | 2 + 55 files changed, 433 insertions(+), 320 deletions(-) create mode 100644 changelog.d/15072.misc diff --git a/changelog.d/15072.misc b/changelog.d/15072.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15072.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 57f27ba4f72f..ff6e04b12fe0 100644 --- a/mypy.ini +++ b/mypy.ini @@ -56,9 +56,6 @@ disallow_untyped_defs = False [mypy-synapse.storage.database] disallow_untyped_defs = False -[mypy-tests.unittest] -disallow_untyped_defs = False - [mypy-tests.util.caches.test_descriptors] disallow_untyped_defs = False diff --git a/tests/app/test_openid_listener.py b/tests/app/test_openid_listener.py index 5d89ba94ad8f..2ee343d8a4ad 100644 --- a/tests/app/test_openid_listener.py +++ b/tests/app/test_openid_listener.py @@ -67,7 +67,9 @@ def test_openid_listener(self, names: List[str], expectation: str) -> None: } # Listen with the config - self.hs._listen_http(parse_listener_def(0, config)) + hs = self.hs + assert isinstance(hs, GenericWorkerServer) + hs._listen_http(parse_listener_def(0, config)) # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] @@ -115,7 +117,9 @@ def test_openid_listener(self, names: List[str], expectation: str) -> None: } # Listen with the config - self.hs._listener_http(self.hs.config, parse_listener_def(0, config)) + hs = self.hs + assert isinstance(hs, SynapseHomeServer) + hs._listener_http(self.hs.config, parse_listener_def(0, config)) # Grab the resource from the site that was told to listen site = self.reactor.tcpServers[0][1] diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 0e8af2da54b5..1b9696748fdc 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -192,7 +192,7 @@ def test_verify_json_for_server(self) -> None: key1 = signedjson.key.generate_signing_key("1") r = self.hs.get_datastores().main.store_server_verify_keys( "server9", - time.time() * 1000, + int(time.time() * 1000), [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))], ) self.get_success(r) @@ -287,7 +287,7 @@ def test_verify_json_for_server_with_null_valid_until_ms(self) -> None: key1 = signedjson.key.generate_signing_key("1") r = self.hs.get_datastores().main.store_server_verify_keys( "server9", - time.time() * 1000, + int(time.time() * 1000), # None is not a valid value in FetchKeyResult, but we're abusing this # API to insert null values into the database. The nulls get converted # to 0 when fetched in KeyStore.get_server_verify_keys. @@ -466,9 +466,9 @@ async def get_json(destination: str, path: str, **kwargs: Any) -> JsonDict: key_json = self.get_success( self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) ) - res = key_json[lookup_triplet] - self.assertEqual(len(res), 1) - res = res[0] + res_keys = key_json[lookup_triplet] + self.assertEqual(len(res_keys), 1) + res = res_keys[0] self.assertEqual(res["key_id"], testverifykey_id) self.assertEqual(res["from_server"], SERVER_NAME) self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) @@ -584,9 +584,9 @@ def test_get_keys_from_perspectives(self) -> None: key_json = self.get_success( self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) ) - res = key_json[lookup_triplet] - self.assertEqual(len(res), 1) - res = res[0] + res_keys = key_json[lookup_triplet] + self.assertEqual(len(res_keys), 1) + res = res_keys[0] self.assertEqual(res["key_id"], testverifykey_id) self.assertEqual(res["from_server"], self.mock_perspective_server.server_name) self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) @@ -705,9 +705,9 @@ def test_get_perspectives_own_key(self) -> None: key_json = self.get_success( self.hs.get_datastores().main.get_server_keys_json([lookup_triplet]) ) - res = key_json[lookup_triplet] - self.assertEqual(len(res), 1) - res = res[0] + res_keys = key_json[lookup_triplet] + self.assertEqual(len(res_keys), 1) + res = res_keys[0] self.assertEqual(res["key_id"], testverifykey_id) self.assertEqual(res["from_server"], self.mock_perspective_server.server_name) self.assertEqual(res["ts_added_ms"], self.reactor.seconds() * 1000) diff --git a/tests/events/test_presence_router.py b/tests/events/test_presence_router.py index 741bb6464a81..6fb1f1bd6e31 100644 --- a/tests/events/test_presence_router.py +++ b/tests/events/test_presence_router.py @@ -156,11 +156,11 @@ class PresenceRouterTestCase(FederatingHomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # Mock out the calls over federation. - fed_transport_client = Mock(spec=["send_transaction"]) - fed_transport_client.send_transaction = simple_async_mock({}) + self.fed_transport_client = Mock(spec=["send_transaction"]) + self.fed_transport_client.send_transaction = simple_async_mock({}) hs = self.setup_test_homeserver( - federation_transport_client=fed_transport_client, + federation_transport_client=self.fed_transport_client, ) load_legacy_presence_router(hs) @@ -422,7 +422,7 @@ def send_local_online_presence_to_with_module_test_body(self) -> None: # # Thus we reset the mock, and try sending all online local user # presence again - self.hs.get_federation_transport_client().send_transaction.reset_mock() + self.fed_transport_client.send_transaction.reset_mock() # Broadcast local user online presence self.get_success( @@ -447,9 +447,7 @@ def send_local_online_presence_to_with_module_test_body(self) -> None: } found_users = set() - calls = ( - self.hs.get_federation_transport_client().send_transaction.call_args_list - ) + calls = self.fed_transport_client.send_transaction.call_args_list for call in calls: call_args = call[0] federation_transaction: Transaction = call_args[0] diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index d667dd27bf40..35dd9a20df2c 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -17,7 +17,7 @@ from synapse.api.errors import Codes, SynapseError from synapse.rest import admin from synapse.rest.client import login, room -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, UserID, create_requester from tests import unittest from tests.test_utils import make_awaitable @@ -56,7 +56,11 @@ def test_complexity_simple(self) -> None: # Artificially raise the complexity store = self.hs.get_datastores().main - store.get_current_state_event_counts = lambda x: make_awaitable(500 * 1.23) + + async def get_current_state_event_counts(room_id: str) -> int: + return int(500 * 1.23) + + store.get_current_state_event_counts = get_current_state_event_counts # type: ignore[assignment] # Get the room complexity again -- make sure it's our artificial value channel = self.make_signed_federation_request( @@ -75,12 +79,12 @@ def test_join_too_large(self) -> None: # Mock out some things, because we don't want to test the whole join fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) - handler.federation_handler.do_invite_join = Mock( + handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) d = handler._remote_join( - None, + create_requester(u1), ["other.example.com"], "roomid", UserID.from_string(u1), @@ -106,12 +110,12 @@ def test_join_too_large_admin(self) -> None: # Mock out some things, because we don't want to test the whole join fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) - handler.federation_handler.do_invite_join = Mock( + handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) d = handler._remote_join( - None, + create_requester(u1), ["other.example.com"], "roomid", UserID.from_string(u1), @@ -144,17 +148,18 @@ def test_join_too_large_once_joined(self) -> None: # Mock out some things, because we don't want to test the whole join fed_transport.client.get_json = Mock(return_value=make_awaitable(None)) - handler.federation_handler.do_invite_join = Mock( + handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) # Artificially raise the complexity - self.hs.get_datastores().main.get_current_state_event_counts = ( - lambda x: make_awaitable(600) - ) + async def get_current_state_event_counts(room_id: str) -> int: + return 600 + + self.hs.get_datastores().main.get_current_state_event_counts = get_current_state_event_counts # type: ignore[assignment] d = handler._remote_join( - None, + create_requester(u1), ["other.example.com"], room_1, UserID.from_string(u1), @@ -200,12 +205,12 @@ def test_join_too_large_no_admin(self) -> None: # Mock out some things, because we don't want to test the whole join fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) - handler.federation_handler.do_invite_join = Mock( + handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) d = handler._remote_join( - None, + create_requester(u1), ["other.example.com"], "roomid", UserID.from_string(u1), @@ -230,12 +235,12 @@ def test_join_too_large_admin(self) -> None: # Mock out some things, because we don't want to test the whole join fed_transport.client.get_json = Mock(return_value=make_awaitable({"v1": 9999})) - handler.federation_handler.do_invite_join = Mock( + handler.federation_handler.do_invite_join = Mock( # type: ignore[assignment] return_value=make_awaitable(("", 1)) ) d = handler._remote_join( - None, + create_requester(u1), ["other.example.com"], "roomid", UserID.from_string(u1), diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index a986b15f0a87..6381583c24b4 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -5,7 +5,11 @@ from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.federation.sender import PerDestinationQueue, TransactionManager +from synapse.federation.sender import ( + FederationSender, + PerDestinationQueue, + TransactionManager, +) from synapse.federation.units import Edu, Transaction from synapse.rest import admin from synapse.rest.client import login, room @@ -33,8 +37,9 @@ class FederationCatchUpTestCases(FederatingHomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.federation_transport_client = Mock(spec=["send_transaction"]) return self.setup_test_homeserver( - federation_transport_client=Mock(spec=["send_transaction"]), + federation_transport_client=self.federation_transport_client, ) def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -52,10 +57,14 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.pdus: List[JsonDict] = [] self.failed_pdus: List[JsonDict] = [] self.is_online = True - self.hs.get_federation_transport_client().send_transaction.side_effect = ( + self.federation_transport_client.send_transaction.side_effect = ( self.record_transaction ) + federation_sender = hs.get_federation_sender() + assert isinstance(federation_sender, FederationSender) + self.federation_sender = federation_sender + def default_config(self) -> JsonDict: config = super().default_config() config["federation_sender_instances"] = None @@ -229,11 +238,11 @@ def test_catch_up_from_blank_state(self) -> None: # let's delete the federation transmission queue # (this pretends we are starting up fresh.) self.assertFalse( - self.hs.get_federation_sender() - ._per_destination_queues["host2"] - .transmission_loop_running + self.federation_sender._per_destination_queues[ + "host2" + ].transmission_loop_running ) - del self.hs.get_federation_sender()._per_destination_queues["host2"] + del self.federation_sender._per_destination_queues["host2"] # let's also clear any backoffs self.get_success( @@ -322,6 +331,7 @@ def test_catch_up_loop(self) -> None: # also fetch event 5 so we know its last_successful_stream_ordering later event_5 = self.get_success(self.hs.get_datastores().main.get_event(event_id_5)) + assert event_2.internal_metadata.stream_ordering is not None self.get_success( self.hs.get_datastores().main.set_destination_last_successful_stream_ordering( "host2", event_2.internal_metadata.stream_ordering @@ -425,15 +435,16 @@ def test_catch_up_on_synapse_startup(self) -> None: def wake_destination_track(destination: str) -> None: woken.append(destination) - self.hs.get_federation_sender().wake_destination = wake_destination_track + self.federation_sender.wake_destination = wake_destination_track # type: ignore[assignment] # cancel the pre-existing timer for _wake_destinations_needing_catchup # this is because we are calling it manually rather than waiting for it # to be called automatically - self.hs.get_federation_sender()._catchup_after_startup_timer.cancel() + assert self.federation_sender._catchup_after_startup_timer is not None + self.federation_sender._catchup_after_startup_timer.cancel() self.get_success( - self.hs.get_federation_sender()._wake_destinations_needing_catchup(), by=5.0 + self.federation_sender._wake_destinations_needing_catchup(), by=5.0 ) # ASSERT (_wake_destinations_needing_catchup): @@ -475,6 +486,7 @@ def test_not_latest_event(self) -> None: ) ) + assert event_1.internal_metadata.stream_ordering is not None self.get_success( self.hs.get_datastores().main.set_destination_last_successful_stream_ordering( "host2", event_1.internal_metadata.stream_ordering diff --git a/tests/federation/test_federation_client.py b/tests/federation/test_federation_client.py index 86e1236501fd..91694e4fcada 100644 --- a/tests/federation/test_federation_client.py +++ b/tests/federation/test_federation_client.py @@ -178,7 +178,7 @@ def test_get_pdu_event_from_cache_is_pristine(self) -> None: RoomVersions.V9, ) ) - self.assertIsNotNone(pulled_pdu_info2) + assert pulled_pdu_info2 is not None remote_pdu2 = pulled_pdu_info2.pdu # Sanity check that we are working against the same event @@ -226,7 +226,7 @@ def _get_pdu_once(self) -> EventBase: RoomVersions.V9, ) ) - self.assertIsNotNone(pulled_pdu_info) + assert pulled_pdu_info is not None remote_pdu = pulled_pdu_info.pdu # check the right call got made to the agent diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index ddeffe1ad53b..9e104fd96aeb 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -22,6 +22,7 @@ from synapse.api.constants import EduTypes, RoomEncryptionAlgorithms from synapse.federation.units import Transaction +from synapse.handlers.device import DeviceHandler from synapse.rest import admin from synapse.rest.client import login from synapse.server import HomeServer @@ -41,8 +42,9 @@ class FederationSenderReceiptsTestCases(HomeserverTestCase): """ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.federation_transport_client = Mock(spec=["send_transaction"]) hs = self.setup_test_homeserver( - federation_transport_client=Mock(spec=["send_transaction"]), + federation_transport_client=self.federation_transport_client, ) hs.get_storage_controllers().state.get_current_hosts_in_room = Mock( # type: ignore[assignment] @@ -61,9 +63,7 @@ def default_config(self) -> JsonDict: return config def test_send_receipts(self) -> None: - mock_send_transaction = ( - self.hs.get_federation_transport_client().send_transaction - ) + mock_send_transaction = self.federation_transport_client.send_transaction mock_send_transaction.return_value = make_awaitable({}) sender = self.hs.get_federation_sender() @@ -103,9 +103,7 @@ def test_send_receipts(self) -> None: ) def test_send_receipts_thread(self) -> None: - mock_send_transaction = ( - self.hs.get_federation_transport_client().send_transaction - ) + mock_send_transaction = self.federation_transport_client.send_transaction mock_send_transaction.return_value = make_awaitable({}) # Create receipts for: @@ -181,9 +179,7 @@ def test_send_receipts_thread(self) -> None: def test_send_receipts_with_backoff(self) -> None: """Send two receipts in quick succession; the second should be flushed, but only after 20ms""" - mock_send_transaction = ( - self.hs.get_federation_transport_client().send_transaction - ) + mock_send_transaction = self.federation_transport_client.send_transaction mock_send_transaction.return_value = make_awaitable({}) sender = self.hs.get_federation_sender() @@ -277,10 +273,11 @@ class FederationSenderDevicesTestCases(HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.federation_transport_client = Mock( + spec=["send_transaction", "query_user_devices"] + ) return self.setup_test_homeserver( - federation_transport_client=Mock( - spec=["send_transaction", "query_user_devices"] - ), + federation_transport_client=self.federation_transport_client, ) def default_config(self) -> JsonDict: @@ -310,9 +307,13 @@ async def get_current_hosts_in_room(room_id: str) -> Set[str]: hs.get_datastores().main.get_current_hosts_in_room = get_current_hosts_in_room # type: ignore[assignment] + device_handler = hs.get_device_handler() + assert isinstance(device_handler, DeviceHandler) + self.device_handler = device_handler + # whenever send_transaction is called, record the edu data self.edus: List[JsonDict] = [] - self.hs.get_federation_transport_client().send_transaction.side_effect = ( + self.federation_transport_client.send_transaction.side_effect = ( self.record_transaction ) @@ -353,7 +354,7 @@ def test_dont_send_device_updates_for_remote_users(self) -> None: # Send the server a device list EDU for the other user, this will cause # it to try and resync the device lists. - self.hs.get_federation_transport_client().query_user_devices.return_value = ( + self.federation_transport_client.query_user_devices.return_value = ( make_awaitable( { "stream_id": "1", @@ -364,7 +365,7 @@ def test_dont_send_device_updates_for_remote_users(self) -> None: ) self.get_success( - self.hs.get_device_handler().device_list_updater.incoming_device_list_update( + self.device_handler.device_list_updater.incoming_device_list_update( "host2", { "user_id": "@user2:host2", @@ -507,9 +508,7 @@ def test_delete_devices(self) -> None: stream_id = self.check_device_update_edu(self.edus.pop(0), u1, "D3", stream_id) # delete them again - self.get_success( - self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) - ) + self.get_success(self.device_handler.delete_devices(u1, ["D1", "D2", "D3"])) # We queue up device list updates to be sent over federation, so we # advance to clear the queue. @@ -533,7 +532,7 @@ def test_unreachable_server(self) -> None: """If the destination server is unreachable, all the updates should get sent on recovery """ - mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn = self.federation_transport_client.send_transaction mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) # create devices @@ -543,9 +542,7 @@ def test_unreachable_server(self) -> None: self.login("user", "pass", device_id="D3") # delete them again - self.get_success( - self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) - ) + self.get_success(self.device_handler.delete_devices(u1, ["D1", "D2", "D3"])) # We queue up device list updates to be sent over federation, so we # advance to clear the queue. @@ -580,7 +577,7 @@ def test_prune_outbound_device_pokes1(self) -> None: This case tests the behaviour when the server has never been reachable. """ - mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn = self.federation_transport_client.send_transaction mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) # create devices @@ -590,9 +587,7 @@ def test_prune_outbound_device_pokes1(self) -> None: self.login("user", "pass", device_id="D3") # delete them again - self.get_success( - self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) - ) + self.get_success(self.device_handler.delete_devices(u1, ["D1", "D2", "D3"])) # We queue up device list updates to be sent over federation, so we # advance to clear the queue. @@ -640,7 +635,7 @@ def test_prune_outbound_device_pokes2(self) -> None: self.check_device_update_edu(self.edus.pop(0), u1, "D1", None) # now the server goes offline - mock_send_txn = self.hs.get_federation_transport_client().send_transaction + mock_send_txn = self.federation_transport_client.send_transaction mock_send_txn.side_effect = lambda t, cb: defer.fail(AssertionError("fail")) self.login("user", "pass", device_id="D2") @@ -651,9 +646,7 @@ def test_prune_outbound_device_pokes2(self) -> None: self.reactor.advance(1) # delete them again - self.get_success( - self.hs.get_device_handler().delete_devices(u1, ["D1", "D2", "D3"]) - ) + self.get_success(self.device_handler.delete_devices(u1, ["D1", "D2", "D3"])) self.assertGreaterEqual(mock_send_txn.call_count, 3) diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index a7495ab21a41..9014e60577c7 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -899,7 +899,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # Mock out application services, and allow defining our own in tests self._services: List[ApplicationService] = [] - self.hs.get_datastores().main.get_app_services = Mock( + self.hs.get_datastores().main.get_app_services = Mock( # type: ignore[assignment] return_value=self._services ) diff --git a/tests/handlers/test_cas.py b/tests/handlers/test_cas.py index 2733719d8270..63aad0d10c2f 100644 --- a/tests/handlers/test_cas.py +++ b/tests/handlers/test_cas.py @@ -61,7 +61,7 @@ def test_map_cas_user_to_user(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] cas_response = CasResponse("test_user", {}) request = _mock_request() @@ -89,7 +89,7 @@ def test_map_cas_user_to_existing_user(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # Map a user via SSO. cas_response = CasResponse("test_user", {}) @@ -129,7 +129,7 @@ def test_map_cas_user_to_invalid_localpart(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] cas_response = CasResponse("föö", {}) request = _mock_request() @@ -160,7 +160,7 @@ def test_required_attributes(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # The response doesn't have the proper userGroup or department. cas_response = CasResponse("test_user", {}) diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 95698bc27585..6b4cba65d069 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -23,6 +23,7 @@ from synapse.api.constants import RoomEncryptionAlgorithms from synapse.api.errors import Codes, SynapseError +from synapse.handlers.device import DeviceHandler from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -187,37 +188,37 @@ def test_fallback_key(self) -> None: ) # we should now have an unused alg1 key - res = self.get_success( + fallback_res = self.get_success( self.store.get_e2e_unused_fallback_key_types(local_user, device_id) ) - self.assertEqual(res, ["alg1"]) + self.assertEqual(fallback_res, ["alg1"]) # claiming an OTK when no OTKs are available should return the fallback # key - res = self.get_success( + claim_res = self.get_success( self.handler.claim_one_time_keys( {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None ) ) self.assertEqual( - res, + claim_res, {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, ) # we shouldn't have any unused fallback keys again - res = self.get_success( + unused_res = self.get_success( self.store.get_e2e_unused_fallback_key_types(local_user, device_id) ) - self.assertEqual(res, []) + self.assertEqual(unused_res, []) # claiming an OTK again should return the same fallback key - res = self.get_success( + claim_res = self.get_success( self.handler.claim_one_time_keys( {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None ) ) self.assertEqual( - res, + claim_res, {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key}}}, ) @@ -231,10 +232,10 @@ def test_fallback_key(self) -> None: ) ) - res = self.get_success( + unused_res = self.get_success( self.store.get_e2e_unused_fallback_key_types(local_user, device_id) ) - self.assertEqual(res, []) + self.assertEqual(unused_res, []) # uploading a new fallback key should result in an unused fallback key self.get_success( @@ -245,10 +246,10 @@ def test_fallback_key(self) -> None: ) ) - res = self.get_success( + unused_res = self.get_success( self.store.get_e2e_unused_fallback_key_types(local_user, device_id) ) - self.assertEqual(res, ["alg1"]) + self.assertEqual(unused_res, ["alg1"]) # if the user uploads a one-time key, the next claim should fetch the # one-time key, and then go back to the fallback @@ -258,23 +259,23 @@ def test_fallback_key(self) -> None: ) ) - res = self.get_success( + claim_res = self.get_success( self.handler.claim_one_time_keys( {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None ) ) self.assertEqual( - res, + claim_res, {"failures": {}, "one_time_keys": {local_user: {device_id: otk}}}, ) - res = self.get_success( + claim_res = self.get_success( self.handler.claim_one_time_keys( {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None ) ) self.assertEqual( - res, + claim_res, {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key2}}}, ) @@ -287,13 +288,13 @@ def test_fallback_key(self) -> None: ) ) - res = self.get_success( + claim_res = self.get_success( self.handler.claim_one_time_keys( {"one_time_keys": {local_user: {device_id: "alg1"}}}, timeout=None ) ) self.assertEqual( - res, + claim_res, {"failures": {}, "one_time_keys": {local_user: {device_id: fallback_key3}}}, ) @@ -366,7 +367,7 @@ def test_reupload_signatures(self) -> None: self.get_success(self.handler.upload_signing_keys_for_user(local_user, keys1)) # upload two device keys, which will be signed later by the self-signing key - device_key_1 = { + device_key_1: JsonDict = { "user_id": local_user, "device_id": "abc", "algorithms": [ @@ -379,7 +380,7 @@ def test_reupload_signatures(self) -> None: }, "signatures": {local_user: {"ed25519:abc": "base64+signature"}}, } - device_key_2 = { + device_key_2: JsonDict = { "user_id": local_user, "device_id": "def", "algorithms": [ @@ -451,8 +452,10 @@ def test_self_signing_key_doesnt_show_up_as_device(self) -> None: } self.get_success(self.handler.upload_signing_keys_for_user(local_user, keys1)) + device_handler = self.hs.get_device_handler() + assert isinstance(device_handler, DeviceHandler) e = self.get_failure( - self.hs.get_device_handler().check_device_registered( + device_handler.check_device_registered( user_id=local_user, device_id="nqOvzeuGWT/sRx3h7+MHoInYj3Uk2LD/unI9kDYcHwk", initial_device_display_name="new display name", @@ -475,7 +478,7 @@ def test_upload_signatures(self) -> None: device_id = "xyz" # private key: OMkooTr76ega06xNvXIGPbgvvxAOzmQncN8VObS7aBA device_pubkey = "NnHhnqiMFQkq969szYkooLaBAXW244ZOxgukCvm2ZeY" - device_key = { + device_key: JsonDict = { "user_id": local_user, "device_id": device_id, "algorithms": [ @@ -497,7 +500,7 @@ def test_upload_signatures(self) -> None: # private key: 2lonYOM6xYKdEsO+6KrC766xBcHnYnim1x/4LFGF8B0 master_pubkey = "nqOvzeuGWT/sRx3h7+MHoInYj3Uk2LD/unI9kDYcHwk" - master_key = { + master_key: JsonDict = { "user_id": local_user, "usage": ["master"], "keys": {"ed25519:" + master_pubkey: master_pubkey}, @@ -540,7 +543,7 @@ def test_upload_signatures(self) -> None: # the first user other_user = "@otherboris:" + self.hs.hostname other_master_pubkey = "fHZ3NPiKxoLQm5OoZbKa99SYxprOjNs4TwJUKP+twCM" - other_master_key = { + other_master_key: JsonDict = { # private key: oyw2ZUx0O4GifbfFYM0nQvj9CL0b8B7cyN4FprtK8OI "user_id": other_user, "usage": ["master"], @@ -702,7 +705,7 @@ def test_query_devices_remote_no_sync(self) -> None: remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" - self.hs.get_federation_client().query_client_keys = mock.Mock( + self.hs.get_federation_client().query_client_keys = mock.Mock( # type: ignore[assignment] return_value=make_awaitable( { "device_keys": {remote_user_id: {}}, @@ -782,7 +785,7 @@ def test_query_devices_remote_sync(self) -> None: remote_master_key = "85T7JXPFBAySB/jwby4S3lBPTqY3+Zg53nYuGmu1ggY" remote_self_signing_key = "QeIiFEjluPBtI7WQdG365QKZcFs9kqmHir6RBD0//nQ" - self.hs.get_federation_client().query_user_devices = mock.Mock( + self.hs.get_federation_client().query_user_devices = mock.Mock( # type: ignore[assignment] return_value=make_awaitable( { "user_id": remote_user_id, diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 5868eb2da766..bf0862ed541c 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -371,14 +371,14 @@ def test_backfill_ignores_known_events(self) -> None: # We mock out the FederationClient.backfill method, to pretend that a remote # server has returned our fake event. federation_client_backfill_mock = Mock(return_value=make_awaitable([event])) - self.hs.get_federation_client().backfill = federation_client_backfill_mock + self.hs.get_federation_client().backfill = federation_client_backfill_mock # type: ignore[assignment] # We also mock the persist method with a side effect of itself. This allows us # to track when it has been called while preserving its function. persist_events_and_notify_mock = Mock( side_effect=self.hs.get_federation_event_handler().persist_events_and_notify ) - self.hs.get_federation_event_handler().persist_events_and_notify = ( + self.hs.get_federation_event_handler().persist_events_and_notify = ( # type: ignore[assignment] persist_events_and_notify_mock ) @@ -712,12 +712,12 @@ async def sync_partial_state_room( fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room ), patch.object(store, "is_partial_state_room", mock_is_partial_state_room): # Start the partial state sync. - fed_handler._start_partial_state_room_sync("hs1", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 1) # Try to start another partial state sync. # Nothing should happen. - fed_handler._start_partial_state_room_sync("hs3", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs3", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 1) # End the partial state sync @@ -729,7 +729,7 @@ async def sync_partial_state_room( # The next attempt to start the partial state sync should work. is_partial_state = True - fed_handler._start_partial_state_room_sync("hs3", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs3", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 2) def test_partial_state_room_sync_restart(self) -> None: @@ -764,7 +764,7 @@ async def sync_partial_state_room( fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room ), patch.object(store, "is_partial_state_room", mock_is_partial_state_room): # Start the partial state sync. - fed_handler._start_partial_state_room_sync("hs1", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 1) # Fail the partial state sync. @@ -773,11 +773,11 @@ async def sync_partial_state_room( self.assertEqual(mock_sync_partial_state_room.call_count, 1) # Start the partial state sync again. - fed_handler._start_partial_state_room_sync("hs1", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 2) # Deduplicate another partial state sync. - fed_handler._start_partial_state_room_sync("hs3", ["hs2"], "room_id") + fed_handler._start_partial_state_room_sync("hs3", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 2) # Fail the partial state sync. @@ -786,6 +786,6 @@ async def sync_partial_state_room( self.assertEqual(mock_sync_partial_state_room.call_count, 3) mock_sync_partial_state_room.assert_called_with( initial_destination="hs3", - other_destinations=["hs2"], + other_destinations={"hs2"}, room_id="room_id", ) diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index 70ea4d15d4d6..c067e5bfe3dc 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -29,6 +29,7 @@ from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer +from synapse.state import StateResolutionStore from synapse.state.v2 import _mainline_sort, _reverse_topological_power_sort from synapse.types import JsonDict from synapse.util import Clock @@ -161,6 +162,7 @@ def _test_process_pulled_event_with_missing_state( if prev_exists_as_outlier: prev_event.internal_metadata.outlier = True persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None self.get_success( persistence.persist_event( prev_event, @@ -861,7 +863,7 @@ def test_process_pulled_event_with_rejected_missing_state(self) -> None: bert_member_event.event_id: bert_member_event, rejected_kick_event.event_id: rejected_kick_event, }, - state_res_store=main_store, + state_res_store=StateResolutionStore(main_store), ) ), [bert_member_event.event_id, rejected_kick_event.event_id], @@ -906,7 +908,7 @@ def test_process_pulled_event_with_rejected_missing_state(self) -> None: rejected_power_levels_event.event_id, ], event_map={}, - state_res_store=main_store, + state_res_store=StateResolutionStore(main_store), full_conflicted_set=set(), ) ), diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index c4727ab917fd..69d384442f43 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -41,20 +41,21 @@ class EventCreationTestCase(unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = self.hs.get_event_creation_handler() - self._persist_event_storage_controller = ( - self.hs.get_storage_controllers().persistence - ) + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self._persist_event_storage_controller = persistence self.user_id = self.register_user("tester", "foobar") self.access_token = self.login("tester", "foobar") self.room_id = self.helper.create_room_as(self.user_id, tok=self.access_token) - self.info = self.get_success( + info = self.get_success( self.hs.get_datastores().main.get_user_by_access_token( self.access_token, ) ) - self.token_id = self.info.token_id + assert info is not None + self.token_id = info.token_id self.requester = create_requester(self.user_id, access_token_id=self.token_id) diff --git a/tests/handlers/test_password_providers.py b/tests/handlers/test_password_providers.py index 0916de64f548..aa91bc0a3df4 100644 --- a/tests/handlers/test_password_providers.py +++ b/tests/handlers/test_password_providers.py @@ -852,7 +852,7 @@ def _test_3pid_allowed(self, username: str, registration: bool) -> None: username: The username to use for the test. registration: Whether to test with registration URLs. """ - self.hs.get_identity_handler().send_threepid_validation = Mock( + self.hs.get_identity_handler().send_threepid_validation = Mock( # type: ignore[assignment] return_value=make_awaitable(0), ) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index b9332d97dcdc..782ef09cf4ab 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -203,7 +203,7 @@ def test_mau_limits_when_disabled(self) -> None: @override_config({"limit_usage_by_mau": True}) def test_get_or_create_user_mau_not_blocked(self) -> None: - self.store.count_monthly_users = Mock( + self.store.count_monthly_users = Mock( # type: ignore[assignment] return_value=make_awaitable(self.hs.config.server.max_mau_value - 1) ) # Ensure does not throw exception @@ -304,7 +304,7 @@ def test_auto_create_auto_join_rooms_when_user_is_not_a_real_user(self) -> None: def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self) -> None: room_alias_str = "#room:test" - self.store.count_real_users = Mock(return_value=make_awaitable(1)) + self.store.count_real_users = Mock(return_value=make_awaitable(1)) # type: ignore[assignment] self.store.is_real_user = Mock(return_value=make_awaitable(True)) user_id = self.get_success(self.handler.register_user(localpart="real")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) @@ -319,7 +319,7 @@ def test_auto_create_auto_join_rooms_when_user_is_the_first_real_user(self) -> N def test_auto_create_auto_join_rooms_when_user_is_not_the_first_real_user( self, ) -> None: - self.store.count_real_users = Mock(return_value=make_awaitable(2)) + self.store.count_real_users = Mock(return_value=make_awaitable(2)) # type: ignore[assignment] self.store.is_real_user = Mock(return_value=make_awaitable(True)) user_id = self.get_success(self.handler.register_user(localpart="real")) rooms = self.get_success(self.store.get_rooms_for_user(user_id)) @@ -346,6 +346,7 @@ def test_auto_create_auto_join_rooms_federated(self) -> None: # Ensure the room is properly not federated. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + assert room is not None self.assertFalse(room["federatable"]) self.assertFalse(room["public"]) self.assertEqual(room["join_rules"], "public") @@ -375,6 +376,7 @@ def test_auto_join_mxid_localpart(self) -> None: # Ensure the room is properly a public room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + assert room is not None self.assertEqual(room["join_rules"], "public") # Both users should be in the room. @@ -413,6 +415,7 @@ def test_auto_create_auto_join_room_preset(self) -> None: # Ensure the room is properly a private room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + assert room is not None self.assertFalse(room["public"]) self.assertEqual(room["join_rules"], "invite") self.assertEqual(room["guest_access"], "can_join") @@ -456,6 +459,7 @@ def test_auto_create_auto_join_room_preset_guest(self) -> None: # Ensure the room is properly a private room. room = self.get_success(self.store.get_room_with_stats(room_id["room_id"])) + assert room is not None self.assertFalse(room["public"]) self.assertEqual(room["join_rules"], "invite") self.assertEqual(room["guest_access"], "can_join") diff --git a/tests/handlers/test_saml.py b/tests/handlers/test_saml.py index 9b1b8b9f1301..b5c772a7aedf 100644 --- a/tests/handlers/test_saml.py +++ b/tests/handlers/test_saml.py @@ -134,7 +134,7 @@ def test_map_saml_response_to_user(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # send a mocked-up SAML response to the callback saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) @@ -164,7 +164,7 @@ def test_map_saml_response_to_existing_user(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # Map a user via SSO. saml_response = FakeAuthnResponse( @@ -206,11 +206,11 @@ def test_map_saml_response_to_invalid_localpart(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # mock out the error renderer too sso_handler = self.hs.get_sso_handler() - sso_handler.render_error = Mock(return_value=None) + sso_handler.render_error = Mock(return_value=None) # type: ignore[assignment] saml_response = FakeAuthnResponse({"uid": "test", "username": "föö"}) request = _mock_request() @@ -227,9 +227,9 @@ def test_map_saml_response_to_user_retries(self) -> None: # stub out the auth handler and error renderer auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] sso_handler = self.hs.get_sso_handler() - sso_handler.render_error = Mock(return_value=None) + sso_handler.render_error = Mock(return_value=None) # type: ignore[assignment] # register a user to occupy the first-choice MXID store = self.hs.get_datastores().main @@ -312,7 +312,7 @@ def test_attribute_requirements(self) -> None: # stub out the auth handler auth_handler = self.hs.get_auth_handler() - auth_handler.complete_sso_login = simple_async_mock() + auth_handler.complete_sso_login = simple_async_mock() # type: ignore[assignment] # The response doesn't have the proper userGroup or department. saml_response = FakeAuthnResponse({"uid": "test_user", "username": "test_user"}) diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 1fe9563c98f3..94518a7196b4 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -74,8 +74,8 @@ def make_homeserver( mock_keyring.verify_json_for_server.return_value = make_awaitable(True) # we mock out the federation client too - mock_federation_client = Mock(spec=["put_json"]) - mock_federation_client.put_json.return_value = make_awaitable((200, "OK")) + self.mock_federation_client = Mock(spec=["put_json"]) + self.mock_federation_client.put_json.return_value = make_awaitable((200, "OK")) # the tests assume that we are starting at unix time 1000 reactor.pump((1000,)) @@ -83,7 +83,7 @@ def make_homeserver( self.mock_hs_notifier = Mock() hs = self.setup_test_homeserver( notifier=self.mock_hs_notifier, - federation_http_client=mock_federation_client, + federation_http_client=self.mock_federation_client, keyring=mock_keyring, replication_streams={}, ) @@ -233,8 +233,7 @@ def test_started_typing_remote_send(self) -> None: ) ) - put_json = self.hs.get_federation_http_client().put_json - put_json.assert_called_once_with( + self.mock_federation_client.put_json.assert_called_once_with( "farm", path="/_matrix/federation/v1/send/1000000", data=_expect_edu_transaction( @@ -349,8 +348,7 @@ def test_stopped_typing(self) -> None: self.on_new_event.assert_has_calls([call("typing_key", 1, rooms=[ROOM_ID])]) - put_json = self.hs.get_federation_http_client().put_json - put_json.assert_called_once_with( + self.mock_federation_client.put_json.assert_called_once_with( "farm", path="/_matrix/federation/v1/send/1000000", data=_expect_edu_transaction( diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index e9be5fb504f8..f65a68b9c2df 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Tuple +from typing import Any, Tuple from unittest.mock import Mock, patch from urllib.parse import quote @@ -24,7 +24,7 @@ from synapse.rest.client import login, register, room, user_directory from synapse.server import HomeServer from synapse.storage.roommember import ProfileInfo -from synapse.types import create_requester +from synapse.types import UserProfile, create_requester from synapse.util import Clock from tests import unittest @@ -34,6 +34,12 @@ from tests.unittest import override_config +# A spam checker which doesn't implement anything, so create a bare object. +class UselessSpamChecker: + def __init__(self, config: Any): + pass + + class UserDirectoryTestCase(unittest.HomeserverTestCase): """Tests the UserDirectoryHandler. @@ -773,7 +779,7 @@ def test_spam_checker(self) -> None: s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 1) - async def allow_all(user_profile: ProfileInfo) -> bool: + async def allow_all(user_profile: UserProfile) -> bool: # Allow all users. return False @@ -787,7 +793,7 @@ async def allow_all(user_profile: ProfileInfo) -> bool: self.assertEqual(len(s["results"]), 1) # Configure a spam checker that filters all users. - async def block_all(user_profile: ProfileInfo) -> bool: + async def block_all(user_profile: UserProfile) -> bool: # All users are spammy. return True @@ -797,6 +803,13 @@ async def block_all(user_profile: ProfileInfo) -> bool: s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 0) + @override_config( + { + "spam_checker": { + "module": "tests.handlers.test_user_directory.UselessSpamChecker" + } + } + ) def test_legacy_spam_checker(self) -> None: """ A spam checker without the expected method should be ignored. @@ -825,11 +838,6 @@ def test_legacy_spam_checker(self) -> None: self.assertEqual(shares_private, {(u1, u2, room), (u2, u1, room)}) self.assertEqual(public_users, set()) - # Configure a spam checker. - spam_checker = self.hs.get_spam_checker() - # The spam checker doesn't need any methods, so create a bare object. - spam_checker.spam_checker = object() - # We get one search result when searching for user2 by user1. s = self.get_success(self.handler.search_users(u1, "user2", 10)) self.assertEqual(len(s["results"]), 1) @@ -954,10 +962,9 @@ def _add_user_to_room( ) context = self.get_success(unpersisted_context.persist(event)) - - self.get_success( - self.hs.get_storage_controllers().persistence.persist_event(event, context) - ) + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self.get_success(persistence.persist_event(event, context)) def test_local_user_leaving_room_remains_in_user_directory(self) -> None: """We've chosen to simplify the user directory's implementation by diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index cc173ebda62f..3a1929691e9c 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -68,11 +68,11 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: # Mock out the calls over federation. - fed_transport_client = Mock(spec=["send_transaction"]) - fed_transport_client.send_transaction = simple_async_mock({}) + self.fed_transport_client = Mock(spec=["send_transaction"]) + self.fed_transport_client.send_transaction = simple_async_mock({}) return self.setup_test_homeserver( - federation_transport_client=fed_transport_client, + federation_transport_client=self.fed_transport_client, ) def test_can_register_user(self) -> None: @@ -417,7 +417,7 @@ def test_send_local_online_presence_to_federation(self) -> None: # # Thus we reset the mock, and try sending online local user # presence again - self.hs.get_federation_transport_client().send_transaction.reset_mock() + self.fed_transport_client.send_transaction.reset_mock() # Broadcast local user online presence self.get_success( @@ -429,9 +429,7 @@ def test_send_local_online_presence_to_federation(self) -> None: # Check that a presence update was sent as part of a federation transaction found_update = False - calls = ( - self.hs.get_federation_transport_client().send_transaction.call_args_list - ) + calls = self.fed_transport_client.send_transaction.call_args_list for call in calls: call_args = call[0] federation_transaction: Transaction = call_args[0] @@ -581,7 +579,7 @@ def test_update_room_membership_remote_join(self) -> None: mocked_remote_join = simple_async_mock( return_value=("fake-event-id", fake_stream_id) ) - self.hs.get_room_member_handler()._remote_join = mocked_remote_join + self.hs.get_room_member_handler()._remote_join = mocked_remote_join # type: ignore[assignment] fake_remote_host = f"{self.module_api.server_name}-remote" # Given that the join is to be faked, we expect the relevant join event not to diff --git a/tests/push/test_email.py b/tests/push/test_email.py index ab8bb417e759..7563f33fdc78 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -23,6 +23,7 @@ import synapse.rest.admin from synapse.api.errors import Codes, SynapseError +from synapse.push.emailpusher import EmailPusher from synapse.rest.client import login, room from synapse.server import HomeServer from synapse.util import Clock @@ -105,6 +106,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(self.access_token) ) + assert user_tuple is not None self.token_id = user_tuple.token_id # We need to add email to account before we can create a pusher. @@ -114,7 +116,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: ) ) - self.pusher = self.get_success( + pusher = self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=self.user_id, access_token=self.token_id, @@ -127,6 +129,8 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: data={}, ) ) + assert isinstance(pusher, EmailPusher) + self.pusher = pusher self.auth_handler = hs.get_auth_handler() self.store = hs.get_datastores().main @@ -375,10 +379,13 @@ def test_no_email_sent_after_removed(self) -> None: ) # check that the pusher for that email address has been deleted - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 0) def test_remove_unlinked_pushers_background_job(self) -> None: @@ -413,10 +420,13 @@ def test_remove_unlinked_pushers_background_job(self) -> None: self.wait_for_background_updates() # Check that all pushers with unlinked addresses were deleted - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 0) def _check_for_mail(self) -> Tuple[Sequence, Dict]: @@ -428,10 +438,13 @@ def _check_for_mail(self) -> Tuple[Sequence, Dict]: that notification. """ # Get the stream ordering before it gets sent - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) last_stream_ordering = pushers[0].last_stream_ordering @@ -439,10 +452,13 @@ def _check_for_mail(self) -> Tuple[Sequence, Dict]: self.pump(10) # It hasn't succeeded yet, so the stream ordering shouldn't have moved - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) self.assertEqual(last_stream_ordering, pushers[0].last_stream_ordering) @@ -458,10 +474,13 @@ def _check_for_mail(self) -> Tuple[Sequence, Dict]: self.assertEqual(len(self.email_attempts), 1) # The stream ordering has increased - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": self.user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by( + {"user_name": self.user_id} + ) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) self.assertTrue(pushers[0].last_stream_ordering > last_stream_ordering) diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 23447cc31008..c280ddcdf6a9 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Optional, Tuple +from typing import Any, List, Tuple from unittest.mock import Mock from twisted.internet.defer import Deferred @@ -22,7 +22,6 @@ from synapse.push import PusherConfig, PusherConfigException from synapse.rest.client import login, push_rule, pusher, receipts, room from synapse.server import HomeServer -from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import JsonDict from synapse.util import Clock @@ -67,9 +66,10 @@ def test_invalid_configuration(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id - def test_data(data: Optional[JsonDict]) -> None: + def test_data(data: Any) -> None: self.get_failure( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, @@ -113,6 +113,7 @@ def test_sends_http(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -140,10 +141,11 @@ def test_sends_http(self) -> None: self.helper.send(room, body="There!", tok=other_access_token) # Get the stream ordering before it gets sent - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) last_stream_ordering = pushers[0].last_stream_ordering @@ -151,10 +153,11 @@ def test_sends_http(self) -> None: self.pump() # It hasn't succeeded yet, so the stream ordering shouldn't have moved - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) self.assertEqual(last_stream_ordering, pushers[0].last_stream_ordering) @@ -172,10 +175,11 @@ def test_sends_http(self) -> None: self.pump() # The stream ordering has increased - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) self.assertTrue(pushers[0].last_stream_ordering > last_stream_ordering) last_stream_ordering = pushers[0].last_stream_ordering @@ -194,10 +198,11 @@ def test_sends_http(self) -> None: self.pump() # The stream ordering has increased, again - pushers = self.get_success( - self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + pushers = list( + self.get_success( + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + ) ) - pushers = list(pushers) self.assertEqual(len(pushers), 1) self.assertTrue(pushers[0].last_stream_ordering > last_stream_ordering) @@ -229,6 +234,7 @@ def test_sends_high_priority_for_encrypted(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -349,6 +355,7 @@ def test_sends_high_priority_for_one_to_one_only(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -435,6 +442,7 @@ def test_sends_high_priority_for_mention(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -512,6 +520,7 @@ def test_sends_high_priority_for_atroom(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -618,6 +627,7 @@ def _test_push_unread_count(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -753,6 +763,7 @@ def _set_pusher(self, user_id: str, access_token: str, enabled: bool) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id self.get_success( @@ -895,6 +906,7 @@ def test_update_different_device_access_token_device_id(self) -> None: user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_tuple is not None token_id = user_tuple.token_id device_id = user_tuple.device_id @@ -941,9 +953,10 @@ def test_device_id(self) -> None: ) # Look up the user info for the access token so we can compare the device ID. - lookup_result: TokenLookupResult = self.get_success( + lookup_result = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert lookup_result is not None # Get the user's devices and check it has the correct device ID. channel = self.make_request("GET", "/pushers", access_token=access_token) diff --git a/tests/replication/tcp/streams/test_events.py b/tests/replication/tcp/streams/test_events.py index 043dbe76afb0..65ef4bb16055 100644 --- a/tests/replication/tcp/streams/test_events.py +++ b/tests/replication/tcp/streams/test_events.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, List, Optional +from typing import Any, List, Optional, Sequence from twisted.test.proto_helpers import MemoryReactor @@ -139,7 +139,7 @@ def test_update_function_huge_state_change(self) -> None: ) # this is the point in the DAG where we make a fork - fork_point: List[str] = self.get_success( + fork_point: Sequence[str] = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) @@ -168,7 +168,7 @@ def test_update_function_huge_state_change(self) -> None: pl_event = self.get_success( inject_event( self.hs, - prev_event_ids=prev_events, + prev_event_ids=list(prev_events), type=EventTypes.PowerLevels, state_key="", sender=self.user_id, @@ -294,7 +294,7 @@ def test_update_function_state_row_limit(self) -> None: ) # this is the point in the DAG where we make a fork - fork_point: List[str] = self.get_success( + fork_point: Sequence[str] = self.get_success( self.hs.get_datastores().main.get_latest_event_ids_in_room(self.room_id) ) @@ -323,7 +323,7 @@ def test_update_function_state_row_limit(self) -> None: e = self.get_success( inject_event( self.hs, - prev_event_ids=prev_events, + prev_event_ids=list(prev_events), type=EventTypes.PowerLevels, state_key="", sender=self.user_id, diff --git a/tests/replication/tcp/streams/test_partial_state.py b/tests/replication/tcp/streams/test_partial_state.py index 38b5020ce0e1..452ac8506996 100644 --- a/tests/replication/tcp/streams/test_partial_state.py +++ b/tests/replication/tcp/streams/test_partial_state.py @@ -37,7 +37,7 @@ def test_un_partial_stated_room_unblocks_over_replication(self) -> None: room_id = self.helper.create_room_as("@bob:test") # Mark the room as partial-stated. self.get_success( - self.store.store_partial_state_room(room_id, ["serv1", "serv2"], 0, "serv1") + self.store.store_partial_state_room(room_id, {"serv1", "serv2"}, 0, "serv1") ) worker = self.make_worker_hs("synapse.app.generic_worker") diff --git a/tests/replication/tcp/streams/test_typing.py b/tests/replication/tcp/streams/test_typing.py index 68de5d1cc280..5a38ac831f26 100644 --- a/tests/replication/tcp/streams/test_typing.py +++ b/tests/replication/tcp/streams/test_typing.py @@ -13,7 +13,7 @@ # limitations under the License. from unittest.mock import Mock -from synapse.handlers.typing import RoomMember +from synapse.handlers.typing import RoomMember, TypingWriterHandler from synapse.replication.tcp.streams import TypingStream from synapse.util.caches.stream_change_cache import StreamChangeCache @@ -33,6 +33,7 @@ def _build_replication_data_handler(self) -> Mock: def test_typing(self) -> None: typing = self.hs.get_typing_handler() + assert isinstance(typing, TypingWriterHandler) self.reconnect() @@ -88,6 +89,7 @@ def test_reset(self) -> None: sends the proper position and RDATA). """ typing = self.hs.get_typing_handler() + assert isinstance(typing, TypingWriterHandler) self.reconnect() diff --git a/tests/replication/tcp/test_handler.py b/tests/replication/tcp/test_handler.py index 6e4055cc2102..bf927beb6a87 100644 --- a/tests/replication/tcp/test_handler.py +++ b/tests/replication/tcp/test_handler.py @@ -127,6 +127,7 @@ def test_wait_for_stream_position(self) -> None: # ... updating the cache ID gen on the master still shouldn't cause the # deferred to wake up. + assert store._cache_id_gen is not None ctx = store._cache_id_gen.get_next() self.get_success(ctx.__aenter__()) self.get_success(ctx.__aexit__(None, None, None)) diff --git a/tests/replication/test_federation_sender_shard.py b/tests/replication/test_federation_sender_shard.py index 89380e25b59a..08703206a9c9 100644 --- a/tests/replication/test_federation_sender_shard.py +++ b/tests/replication/test_federation_sender_shard.py @@ -16,6 +16,7 @@ from synapse.api.constants import EventTypes, Membership from synapse.events.builder import EventBuilderFactory +from synapse.handlers.typing import TypingWriterHandler from synapse.rest.admin import register_servlets_for_client_rest_resource from synapse.rest.client import login, room from synapse.types import UserID, create_requester @@ -174,6 +175,7 @@ def test_send_typing_sharded(self) -> None: token = self.login("user3", "pass") typing_handler = self.hs.get_typing_handler() + assert isinstance(typing_handler, TypingWriterHandler) sent_on_1 = False sent_on_2 = False diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index 9345cfbeb266..0798b021c3d0 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -50,6 +50,7 @@ def _create_pusher_and_send_msg(self, localpart: str) -> str: user_dict = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) + assert user_dict is not None token_id = user_dict.token_id self.get_success( diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index b50406e12929..f5b213219fa8 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -2913,7 +2913,8 @@ def test_get_rooms_with_nonlocal_user(self) -> None: other_user_tok = self.login("user", "pass") event_builder_factory = self.hs.get_event_builder_factory() event_creation_handler = self.hs.get_event_creation_handler() - storage_controllers = self.hs.get_storage_controllers() + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None # Create two rooms, one with a local user only and one with both a local # and remote user. @@ -2940,7 +2941,7 @@ def test_get_rooms_with_nonlocal_user(self) -> None: context = self.get_success(unpersisted_context.persist(event)) - self.get_success(storage_controllers.persistence.persist_event(event, context)) + self.get_success(persistence.persist_event(event, context)) # Now get rooms url = "/_synapse/admin/v1/users/@joiner:remote_hs/joined_rooms" diff --git a/tests/rest/admin/test_username_available.py b/tests/rest/admin/test_username_available.py index 30f12f1bffc7..6c04e6c56cc2 100644 --- a/tests/rest/admin/test_username_available.py +++ b/tests/rest/admin/test_username_available.py @@ -11,6 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional + from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -33,9 +35,14 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.register_user("admin", "pass", admin=True) self.admin_user_tok = self.login("admin", "pass") - async def check_username(username: str) -> bool: - if username == "allowed": - return True + async def check_username( + localpart: str, + guest_access_token: Optional[str] = None, + assigned_user_id: Optional[str] = None, + inhibit_user_in_use_error: bool = False, + ) -> None: + if localpart == "allowed": + return raise SynapseError( 400, "User ID already taken.", @@ -43,7 +50,7 @@ async def check_username(username: str) -> bool: ) handler = self.hs.get_registration_handler() - handler.check_username = check_username + handler.check_username = check_username # type: ignore[assignment] def test_username_available(self) -> None: """ diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 88f255c9eea9..e2ee1a1766cd 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -1193,7 +1193,7 @@ async def post_json( return {} # Register a mock that will return the expected result depending on the remote. - self.hs.get_federation_http_client().post_json = Mock(side_effect=post_json) + self.hs.get_federation_http_client().post_json = Mock(side_effect=post_json) # type: ignore[assignment] # Check that we've got the correct response from the client-side endpoint. self._test_status( diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index afc8d641bef2..830762fd5358 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -63,14 +63,14 @@ def test_add_filter_for_other_user(self) -> None: def test_add_filter_non_local_user(self) -> None: _is_mine = self.hs.is_mine - self.hs.is_mine = lambda target_user: False + self.hs.is_mine = lambda target_user: False # type: ignore[assignment] channel = self.make_request( "POST", "/_matrix/client/r0/user/%s/filter" % (self.user_id), self.EXAMPLE_FILTER_JSON, ) - self.hs.is_mine = _is_mine + self.hs.is_mine = _is_mine # type: ignore[assignment] self.assertEqual(channel.code, 403) self.assertEqual(channel.json_body["errcode"], Codes.FORBIDDEN) diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index b3738a03046a..67e16880e60b 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -36,14 +36,14 @@ class PresenceTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - presence_handler = Mock(spec=PresenceHandler) - presence_handler.set_state.return_value = make_awaitable(None) + self.presence_handler = Mock(spec=PresenceHandler) + self.presence_handler.set_state.return_value = make_awaitable(None) hs = self.setup_test_homeserver( "red", federation_http_client=None, federation_client=Mock(), - presence_handler=presence_handler, + presence_handler=self.presence_handler, ) return hs @@ -61,7 +61,7 @@ def test_put_presence(self) -> None: ) self.assertEqual(channel.code, HTTPStatus.OK) - self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 1) + self.assertEqual(self.presence_handler.set_state.call_count, 1) @unittest.override_config({"use_presence": False}) def test_put_presence_disabled(self) -> None: @@ -76,4 +76,4 @@ def test_put_presence_disabled(self) -> None: ) self.assertEqual(channel.code, HTTPStatus.OK) - self.assertEqual(self.hs.get_presence_handler().set_state.call_count, 0) + self.assertEqual(self.presence_handler.set_state.call_count, 0) diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 11cf3939d84b..4c561f9525b9 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -151,7 +151,7 @@ def test_POST_disabled_registration(self) -> None: self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") def test_POST_guest_registration(self) -> None: - self.hs.config.key.macaroon_secret_key = "test" + self.hs.config.key.macaroon_secret_key = b"test" self.hs.config.registration.allow_guest_access = True channel = self.make_request(b"POST", self.url + b"?kind=guest", b"{}") @@ -1166,12 +1166,15 @@ def test_background_job(self) -> None: """ user_id = self.register_user("kermit_delta", "user") - self.hs.config.account_validity.startup_job_max_delta = self.max_delta + self.hs.config.account_validity.account_validity_startup_job_max_delta = ( + self.max_delta + ) now_ms = self.hs.get_clock().time_msec() self.get_success(self.store._set_expiration_date_when_missing()) res = self.get_success(self.store.get_expiration_ts_for_user(user_id)) + assert res is not None self.assertGreaterEqual(res, now_ms + self.validity_period - self.max_delta) self.assertLessEqual(res, now_ms + self.validity_period) diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index 9c8c1889d3ea..d3e06bf6b3d1 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -136,6 +136,7 @@ def test_visibility(self) -> None: # Send a first event, which should be filtered out at the end of the test. resp = self.helper.send(room_id=room_id, body="1", tok=self.token) first_event_id = resp.get("event_id") + assert isinstance(first_event_id, str) # Advance the time by 2 days. We're using the default retention policy, therefore # after this the first event will still be valid. @@ -144,6 +145,7 @@ def test_visibility(self) -> None: # Send another event, which shouldn't get filtered out. resp = self.helper.send(room_id=room_id, body="2", tok=self.token) valid_event_id = resp.get("event_id") + assert isinstance(valid_event_id, str) # Advance the time by another 2 days. After this, the first event should be # outdated but not the second one. @@ -229,7 +231,7 @@ def _test_retention_event_purged(self, room_id: str, increment: float) -> None: # Check that we can still access state events that were sent before the event that # has been purged. - self.get_event(room_id, create_event.event_id) + self.get_event(room_id, bool(create_event)) def get_event(self, event_id: str, expect_none: bool = False) -> JsonDict: event = self.get_success(self.store.get_event(event_id, allow_none=True)) @@ -238,7 +240,7 @@ def get_event(self, event_id: str, expect_none: bool = False) -> JsonDict: self.assertIsNone(event) return {} - self.assertIsNotNone(event) + assert event is not None time_now = self.clock.time_msec() serialized = self.serializer.serialize_event(event, time_now) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 9222cab19801..cfad182b2f1a 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -3382,8 +3382,8 @@ def test_threepid_invite_spamcheck_deprecated(self) -> None: # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable((Mock(event_id="abc"), 0))) - self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock - self.hs.get_identity_handler().lookup_3pid = Mock( + self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[assignment] + self.hs.get_identity_handler().lookup_3pid = Mock( # type: ignore[assignment] return_value=make_awaitable(None), ) @@ -3443,8 +3443,8 @@ def test_threepid_invite_spamcheck(self) -> None: # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. make_invite_mock = Mock(return_value=make_awaitable((Mock(event_id="abc"), 0))) - self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock - self.hs.get_identity_handler().lookup_3pid = Mock( + self.hs.get_room_member_handler()._make_and_store_3pid_invite = make_invite_mock # type: ignore[assignment] + self.hs.get_identity_handler().lookup_3pid = Mock( # type: ignore[assignment] return_value=make_awaitable(None), ) @@ -3563,8 +3563,10 @@ def _inject_outlier(self, room_id: str) -> EventBase: ) event.internal_metadata.outlier = True + persistence = self._storage_controllers.persistence + assert persistence is not None self.get_success( - self._storage_controllers.persistence.persist_event( + persistence.persist_event( event, EventContext.for_outlier(self._storage_controllers) ) ) diff --git a/tests/rest/client/test_shadow_banned.py b/tests/rest/client/test_shadow_banned.py index c807a37bc2ea..8d2cdf875150 100644 --- a/tests/rest/client/test_shadow_banned.py +++ b/tests/rest/client/test_shadow_banned.py @@ -84,7 +84,7 @@ def test_invite(self) -> None: def test_invite_3pid(self) -> None: """Ensure that a 3PID invite does not attempt to contact the identity server.""" identity_handler = self.hs.get_identity_handler() - identity_handler.lookup_3pid = Mock( + identity_handler.lookup_3pid = Mock( # type: ignore[assignment] side_effect=AssertionError("This should not get called") ) @@ -222,7 +222,7 @@ def test_typing(self) -> None: event_source.get_new_events( user=UserID.from_string(self.other_user_id), from_key=0, - limit=None, + limit=10, room_ids=[room_id], is_guest=False, ) @@ -286,6 +286,7 @@ def test_displayname(self) -> None: self.banned_user_id, ) ) + assert event is not None self.assertEqual( event.content, {"membership": "join", "displayname": original_display_name} ) @@ -321,6 +322,7 @@ def test_room_displayname(self) -> None: self.banned_user_id, ) ) + assert event is not None self.assertEqual( event.content, {"membership": "join", "displayname": original_display_name} ) diff --git a/tests/rest/client/test_upgrade_room.py b/tests/rest/client/test_upgrade_room.py index 5ec343dd7fd2..0b4c691318e9 100644 --- a/tests/rest/client/test_upgrade_room.py +++ b/tests/rest/client/test_upgrade_room.py @@ -84,7 +84,7 @@ def test_upgrade(self) -> None: self.room_id, EventTypes.Tombstone, "" ) ) - self.assertIsNotNone(tombstone_event) + assert tombstone_event is not None self.assertEqual(new_room_id, tombstone_event.content["replacement_room"]) # Check that the new room exists. diff --git a/tests/server_notices/test_resource_limits_server_notices.py b/tests/server_notices/test_resource_limits_server_notices.py index 5b76383d760a..d2bfa53eda49 100644 --- a/tests/server_notices/test_resource_limits_server_notices.py +++ b/tests/server_notices/test_resource_limits_server_notices.py @@ -24,6 +24,7 @@ from synapse.server_notices.resource_limits_server_notices import ( ResourceLimitsServerNotices, ) +from synapse.server_notices.server_notices_sender import ServerNoticesSender from synapse.types import JsonDict from synapse.util import Clock @@ -58,14 +59,15 @@ def default_config(self) -> JsonDict: return config def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.server_notices_sender = self.hs.get_server_notices_sender() + server_notices_sender = self.hs.get_server_notices_sender() + assert isinstance(server_notices_sender, ServerNoticesSender) # relying on [1] is far from ideal, but the only case where # ResourceLimitsServerNotices class needs to be isolated is this test, # general code should never have a reason to do so ... - self._rlsn = self.server_notices_sender._server_notices[1] - if not isinstance(self._rlsn, ResourceLimitsServerNotices): - raise Exception("Failed to find reference to ResourceLimitsServerNotices") + rlsn = list(server_notices_sender._server_notices)[1] + assert isinstance(rlsn, ResourceLimitsServerNotices) + self._rlsn = rlsn self._rlsn._store.user_last_seen_monthly_active = Mock( return_value=make_awaitable(1000) @@ -101,25 +103,29 @@ def test_maybe_send_server_notice_to_user_flag_off(self) -> None: def test_maybe_send_server_notice_to_user_remove_blocked_notice(self) -> None: """Test when user has blocked notice, but should have it removed""" - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None) ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = Mock( + self._rlsn._store.get_events = Mock( # type: ignore[assignment] return_value=make_awaitable({"123": mock_event}) ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) # Would be better to check the content, but once == remove blocking event - self._rlsn._server_notices_manager.maybe_get_notice_room_for_user.assert_called_once() + maybe_get_notice_room_for_user = ( + self._rlsn._server_notices_manager.maybe_get_notice_room_for_user + ) + assert isinstance(maybe_get_notice_room_for_user, Mock) + maybe_get_notice_room_for_user.assert_called_once() self._send_notice.assert_called_once() def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self) -> None: """ Test when user has blocked notice, but notice ought to be there (NOOP) """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None), side_effect=ResourceLimitError(403, "foo"), ) @@ -127,7 +133,7 @@ def test_maybe_send_server_notice_to_user_remove_blocked_notice_noop(self) -> No mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = Mock( + self._rlsn._store.get_events = Mock( # type: ignore[assignment] return_value=make_awaitable({"123": mock_event}) ) @@ -139,7 +145,7 @@ def test_maybe_send_server_notice_to_user_add_blocked_notice(self) -> None: """ Test when user does not have blocked notice, but should have one """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None), side_effect=ResourceLimitError(403, "foo"), ) @@ -152,7 +158,7 @@ def test_maybe_send_server_notice_to_user_add_blocked_notice_noop(self) -> None: """ Test when user does not have blocked notice, nor should they (NOOP) """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None) ) @@ -165,7 +171,7 @@ def test_maybe_send_server_notice_to_user_not_in_mau_cohort(self) -> None: Test when user is not part of the MAU cohort - this should not ever happen - but ... """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None) ) self._rlsn._store.user_last_seen_monthly_active = Mock( @@ -183,7 +189,7 @@ def test_maybe_send_server_notice_when_alerting_suppressed_room_unblocked( Test that when server is over MAU limit and alerting is suppressed, then an alert message is not sent into the room """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER @@ -198,7 +204,7 @@ def test_check_hs_disabled_unaffected_by_mau_alert_suppression(self) -> None: """ Test that when a server is disabled, that MAU limit alerting is ignored. """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.HS_DISABLED @@ -217,21 +223,21 @@ def test_maybe_send_server_notice_when_alerting_suppressed_room_blocked( When the room is already in a blocked state, test that when alerting is suppressed that the room is returned to an unblocked state. """ - self._rlsn._auth_blocking.check_auth_blocking = Mock( + self._rlsn._auth_blocking.check_auth_blocking = Mock( # type: ignore[assignment] return_value=make_awaitable(None), side_effect=ResourceLimitError( 403, "foo", limit_type=LimitBlockingTypes.MONTHLY_ACTIVE_USER ), ) - self._rlsn._server_notices_manager.__is_room_currently_blocked = Mock( + self._rlsn._is_room_currently_blocked = Mock( # type: ignore[assignment] return_value=make_awaitable((True, [])) ) mock_event = Mock( type=EventTypes.Message, content={"msgtype": ServerNoticeMsgType} ) - self._rlsn._store.get_events = Mock( + self._rlsn._store.get_events = Mock( # type: ignore[assignment] return_value=make_awaitable({"123": mock_event}) ) self.get_success(self._rlsn.maybe_send_server_notice_to_user(self.user_id)) @@ -262,16 +268,18 @@ def default_config(self) -> JsonDict: def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = self.hs.get_datastores().main - self.server_notices_sender = self.hs.get_server_notices_sender() self.server_notices_manager = self.hs.get_server_notices_manager() self.event_source = self.hs.get_event_sources() + server_notices_sender = self.hs.get_server_notices_sender() + assert isinstance(server_notices_sender, ServerNoticesSender) + # relying on [1] is far from ideal, but the only case where # ResourceLimitsServerNotices class needs to be isolated is this test, # general code should never have a reason to do so ... - self._rlsn = self.server_notices_sender._server_notices[1] - if not isinstance(self._rlsn, ResourceLimitsServerNotices): - raise Exception("Failed to find reference to ResourceLimitsServerNotices") + rlsn = list(server_notices_sender._server_notices)[1] + assert isinstance(rlsn, ResourceLimitsServerNotices) + self._rlsn = rlsn self.user_id = "@user_id:test" diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index 9f33afcca04d..9606ecc43b6b 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -120,6 +120,7 @@ def test_persisting_event_invalidates_cache(self) -> None: # Persist the event which should invalidate or prefill the # `have_seen_event` cache so we don't return stale values. persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None self.get_success( persistence.persist_event( event, diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index c070278db80f..a10e5fa8b147 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -389,6 +389,7 @@ def persist( """ persist_events_store = self.hs.get_datastores().persist_events + assert persist_events_store is not None for e in events: e.internal_metadata.stream_ordering = self._next_stream_ordering @@ -397,6 +398,7 @@ def persist( def _persist(txn: LoggingTransaction) -> None: # We need to persist the events to the events and state_events # tables. + assert persist_events_store is not None persist_events_store._store_event_txn( txn, [(e, EventContext(self.hs.get_storage_controllers())) for e in events], @@ -540,7 +542,9 @@ def _generate_room(self) -> Tuple[str, List[Set[str]]]: self.requester, events_and_context=[(event, context)] ) ) - state1 = set(self.get_success(context.get_current_state_ids()).values()) + state_ids1 = self.get_success(context.get_current_state_ids()) + assert state_ids1 is not None + state1 = set(state_ids1.values()) event, context = self.get_success( event_handler.create_event( @@ -560,7 +564,9 @@ def _generate_room(self) -> Tuple[str, List[Set[str]]]: self.requester, events_and_context=[(event, context)] ) ) - state2 = set(self.get_success(context.get_current_state_ids()).values()) + state_ids2 = self.get_success(context.get_current_state_ids()) + assert state_ids2 is not None + state2 = set(state_ids2.values()) # Delete the chain cover info. diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 7fd3e01364e9..8fc7936ab094 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -54,6 +54,9 @@ class EventFederationWorkerStoreTestCase(tests.unittest.HomeserverTestCase): def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main + persist_events = hs.get_datastores().persist_events + assert persist_events is not None + self.persist_events = persist_events def test_get_prev_events_for_room(self) -> None: room_id = "@ROOM:local" @@ -226,7 +229,7 @@ def insert_event(txn: LoggingTransaction) -> None: }, ) - self.hs.datastores.persist_events._persist_event_auth_chain_txn( + self.persist_events._persist_event_auth_chain_txn( txn, [ cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id])) @@ -445,7 +448,7 @@ def insert_event(txn: LoggingTransaction) -> None: ) # Insert all events apart from 'B' - self.hs.datastores.persist_events._persist_event_auth_chain_txn( + self.persist_events._persist_event_auth_chain_txn( txn, [ cast(EventBase, FakeEvent(event_id, room_id, auth_graph[event_id])) @@ -464,7 +467,7 @@ def insert_event(txn: LoggingTransaction) -> None: updatevalues={"has_auth_chain_index": False}, ) - self.hs.datastores.persist_events._persist_event_auth_chain_txn( + self.persist_events._persist_event_auth_chain_txn( txn, [cast(EventBase, FakeEvent("b", room_id, auth_graph["b"]))], ) diff --git a/tests/storage/test_events.py b/tests/storage/test_events.py index 05661a537dbd..e67dd0589db9 100644 --- a/tests/storage/test_events.py +++ b/tests/storage/test_events.py @@ -40,7 +40,9 @@ def prepare( self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer ) -> None: self.state = self.hs.get_state_handler() - self._persistence = self.hs.get_storage_controllers().persistence + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self._persistence = persistence self._state_storage_controller = self.hs.get_storage_controllers().state self.store = self.hs.get_datastores().main @@ -374,7 +376,9 @@ def prepare( self, reactor: MemoryReactor, clock: Clock, homeserver: HomeServer ) -> None: self.state = self.hs.get_state_handler() - self._persistence = self.hs.get_storage_controllers().persistence + persistence = self.hs.get_storage_controllers().persistence + assert persistence is not None + self._persistence = persistence self.store = self.hs.get_datastores().main def test_remote_user_rooms_cache_invalidated(self) -> None: diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index aa4b5bd3b18d..ba68171ad7fb 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -16,8 +16,6 @@ import signedjson.types import unpaddedbase64 -from twisted.internet.defer import Deferred - from synapse.storage.keys import FetchKeyResult import tests.unittest @@ -44,20 +42,26 @@ def test_get_server_verify_keys(self) -> None: key_id_1 = "ed25519:key1" key_id_2 = "ed25519:KEY_ID_2" - d = store.store_server_verify_keys( - "from_server", - 10, - [ - ("server1", key_id_1, FetchKeyResult(KEY_1, 100)), - ("server1", key_id_2, FetchKeyResult(KEY_2, 200)), - ], + self.get_success( + store.store_server_verify_keys( + "from_server", + 10, + [ + ("server1", key_id_1, FetchKeyResult(KEY_1, 100)), + ("server1", key_id_2, FetchKeyResult(KEY_2, 200)), + ], + ) ) - self.get_success(d) - d = store.get_server_verify_keys( - [("server1", key_id_1), ("server1", key_id_2), ("server1", "ed25519:key3")] + res = self.get_success( + store.get_server_verify_keys( + [ + ("server1", key_id_1), + ("server1", key_id_2), + ("server1", "ed25519:key3"), + ] + ) ) - res = self.get_success(d) self.assertEqual(len(res.keys()), 3) res1 = res[("server1", key_id_1)] @@ -82,18 +86,20 @@ def test_cache(self) -> None: key_id_1 = "ed25519:key1" key_id_2 = "ed25519:key2" - d = store.store_server_verify_keys( - "from_server", - 0, - [ - ("srv1", key_id_1, FetchKeyResult(KEY_1, 100)), - ("srv1", key_id_2, FetchKeyResult(KEY_2, 200)), - ], + self.get_success( + store.store_server_verify_keys( + "from_server", + 0, + [ + ("srv1", key_id_1, FetchKeyResult(KEY_1, 100)), + ("srv1", key_id_2, FetchKeyResult(KEY_2, 200)), + ], + ) ) - self.get_success(d) - d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) - res = self.get_success(d) + res = self.get_success( + store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) + ) self.assertEqual(len(res.keys()), 2) res1 = res[("srv1", key_id_1)] @@ -105,9 +111,7 @@ def test_cache(self) -> None: self.assertEqual(res2.valid_until_ts, 200) # we should be able to look up the same thing again without a db hit - res = store.get_server_verify_keys([("srv1", key_id_1)]) - if isinstance(res, Deferred): - res = self.successResultOf(res) + res = self.get_success(store.get_server_verify_keys([("srv1", key_id_1)])) self.assertEqual(len(res.keys()), 1) self.assertEqual(res[("srv1", key_id_1)].verify_key, KEY_1) @@ -119,8 +123,9 @@ def test_cache(self) -> None: ) self.get_success(d) - d = store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) - res = self.get_success(d) + res = self.get_success( + store.get_server_verify_keys([("srv1", key_id_1), ("srv1", key_id_2)]) + ) self.assertEqual(len(res.keys()), 2) res1 = res[("srv1", key_id_1)] diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index 010cc74c3164..d8f42c5d0575 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -112,7 +112,7 @@ def test_purge_room(self) -> None: self.room_id, "m.room.create", "" ) ) - self.assertIsNotNone(create_event) + assert create_event is not None # Purge everything before this topological token self.get_success( diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py index d8d84152dc5d..12c17f10731d 100644 --- a/tests/storage/test_receipts.py +++ b/tests/storage/test_receipts.py @@ -37,9 +37,9 @@ def prepare( self.store = homeserver.get_datastores().main self.room_creator = homeserver.get_room_creation_handler() - self.persist_event_storage_controller = ( - self.hs.get_storage_controllers().persistence - ) + persist_event_storage_controller = self.hs.get_storage_controllers().persistence + assert persist_event_storage_controller is not None + self.persist_event_storage_controller = persist_event_storage_controller # Create a test user self.ourUser = UserID.from_string(OUR_USER_ID) diff --git a/tests/storage/test_room_search.py b/tests/storage/test_room_search.py index 14d872514d21..f183c38477b0 100644 --- a/tests/storage/test_room_search.py +++ b/tests/storage/test_room_search.py @@ -119,7 +119,6 @@ def test_non_string(self) -> None: "content": {"msgtype": "m.text", "body": 2}, "room_id": room_id, "sender": user_id, - "depth": prev_event.depth + 1, "prev_events": prev_event_ids, "origin_server_ts": self.clock.time_msec(), } @@ -134,7 +133,7 @@ def test_non_string(self) -> None: prev_state_map, for_verification=False, ), - depth=event_dict["depth"], + depth=prev_event.depth + 1, ) ) diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index bc090ebce039..05dc4f64b847 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -16,7 +16,7 @@ from twisted.test.proto_helpers import MemoryReactor -from synapse.api.constants import EventTypes, RelationTypes +from synapse.api.constants import Direction, EventTypes, RelationTypes from synapse.api.filtering import Filter from synapse.rest import admin from synapse.rest.client import login, room @@ -128,7 +128,7 @@ def _filter_messages(self, filter: JsonDict) -> List[str]: room_id=self.room_id, from_key=self.from_token.room_key, to_key=None, - direction="f", + direction=Direction.FORWARDS, limit=10, event_filter=Filter(self.hs, filter), ) diff --git a/tests/storage/test_unsafe_locale.py b/tests/storage/test_unsafe_locale.py index ba53c2281869..19da8a9b09da 100644 --- a/tests/storage/test_unsafe_locale.py +++ b/tests/storage/test_unsafe_locale.py @@ -14,6 +14,7 @@ from unittest.mock import MagicMock, patch from synapse.storage.database import make_conn +from synapse.storage.engines import PostgresEngine from synapse.storage.engines._base import IncorrectDatabaseSetup from tests.unittest import HomeserverTestCase @@ -38,6 +39,7 @@ def test_unsafe_locale(self, mock_db_locale: MagicMock) -> None: def test_safe_locale(self) -> None: database = self.hs.get_datastores().databases[0] + assert isinstance(database.engine, PostgresEngine) db_conn = make_conn(database._database_config, database.engine, "test_unsafe") with db_conn.cursor() as txn: diff --git a/tests/test_federation.py b/tests/test_federation.py index ddb43c8c981a..82dfd88b9907 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -12,17 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, Union +from typing import Collection, List, Optional, Union from unittest.mock import Mock -from twisted.internet.defer import succeed from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import FederationError -from synapse.api.room_versions import RoomVersions +from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.events import EventBase, make_event_from_dict from synapse.events.snapshot import EventContext from synapse.federation.federation_base import event_from_pdu_json +from synapse.handlers.device import DeviceListUpdater from synapse.http.types import QueryParams from synapse.logging.context import LoggingContext from synapse.server import HomeServer @@ -81,11 +81,15 @@ async def _check_event_auth( ) -> None: pass - federation_event_handler._check_event_auth = _check_event_auth + federation_event_handler._check_event_auth = _check_event_auth # type: ignore[assignment] self.client = self.hs.get_federation_client() - self.client._check_sigs_and_hash_for_pulled_events_and_fetch = ( - lambda dest, pdus, **k: succeed(pdus) - ) + + async def _check_sigs_and_hash_for_pulled_events_and_fetch( + dest: str, pdus: Collection[EventBase], room_version: RoomVersion + ) -> List[EventBase]: + return list(pdus) + + self.client._check_sigs_and_hash_for_pulled_events_and_fetch = _check_sigs_and_hash_for_pulled_events_and_fetch # type: ignore[assignment] # Send the join, it should return None (which is not an error) self.assertEqual( @@ -187,7 +191,7 @@ def query_user_devices( # Register the mock on the federation client. federation_client = self.hs.get_federation_client() - federation_client.query_user_devices = Mock(side_effect=query_user_devices) + federation_client.query_user_devices = Mock(side_effect=query_user_devices) # type: ignore[assignment] # Register a mock on the store so that the incoming update doesn't fail because # we don't share a room with the user. @@ -197,6 +201,7 @@ def query_user_devices( # Manually inject a fake device list update. We need this update to include at # least one prev_id so that the user's device list will need to be retried. device_list_updater = self.hs.get_device_handler().device_list_updater + assert isinstance(device_list_updater, DeviceListUpdater) self.get_success( device_list_updater.incoming_device_list_update( origin=remote_origin, @@ -236,7 +241,7 @@ def test_cross_signing_keys_retry(self) -> None: # Register mock device list retrieval on the federation client. federation_client = self.hs.get_federation_client() - federation_client.query_user_devices = Mock( + federation_client.query_user_devices = Mock( # type: ignore[assignment] return_value=make_awaitable( { "user_id": remote_user_id, @@ -269,16 +274,18 @@ def test_cross_signing_keys_retry(self) -> None: keys = self.get_success( self.store.get_e2e_cross_signing_keys_bulk(user_ids=[remote_user_id]), ) - self.assertTrue(remote_user_id in keys) + self.assertIn(remote_user_id, keys) + key = keys[remote_user_id] + assert key is not None # Check that the master key is the one returned by the mock. - master_key = keys[remote_user_id]["master"] + master_key = key["master"] self.assertEqual(len(master_key["keys"]), 1) self.assertTrue("ed25519:" + remote_master_key in master_key["keys"].keys()) self.assertTrue(remote_master_key in master_key["keys"].values()) # Check that the self-signing key is the one returned by the mock. - self_signing_key = keys[remote_user_id]["self_signing"] + self_signing_key = key["self_signing"] self.assertEqual(len(self_signing_key["keys"]), 1) self.assertTrue( "ed25519:" + remote_self_signing_key in self_signing_key["keys"].keys(), diff --git a/tests/test_phone_home.py b/tests/test_phone_home.py index cc1a98f1c4ab..3f899b0d9116 100644 --- a/tests/test_phone_home.py +++ b/tests/test_phone_home.py @@ -33,7 +33,7 @@ def test_performance_frozen_clock(self) -> None: If time doesn't move, don't error out. """ past_stats = [ - (self.hs.get_clock().time(), resource.getrusage(resource.RUSAGE_SELF)) + (int(self.hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) ] stats: JsonDict = {} self.get_success(phone_stats_home(self.hs, stats, past_stats)) diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 36d6b37aa421..2801a950a85c 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -35,6 +35,8 @@ def setUp(self) -> None: self.event_creation_handler = self.hs.get_event_creation_handler() self.event_builder_factory = self.hs.get_event_builder_factory() self._storage_controllers = self.hs.get_storage_controllers() + assert self._storage_controllers.persistence is not None + self._persistence = self._storage_controllers.persistence self.get_success(create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")) @@ -179,9 +181,7 @@ def _inject_visibility(self, user_id: str, visibility: str) -> EventBase: self.event_creation_handler.create_new_client_event(builder) ) context = self.get_success(unpersisted_context.persist(event)) - self.get_success( - self._storage_controllers.persistence.persist_event(event, context) - ) + self.get_success(self._persistence.persist_event(event, context)) return event def _inject_room_member( @@ -208,9 +208,7 @@ def _inject_room_member( ) context = self.get_success(unpersisted_context.persist(event)) - self.get_success( - self._storage_controllers.persistence.persist_event(event, context) - ) + self.get_success(self._persistence.persist_event(event, context)) return event def _inject_message( @@ -233,9 +231,7 @@ def _inject_message( ) context = self.get_success(unpersisted_context.persist(event)) - self.get_success( - self._storage_controllers.persistence.persist_event(event, context) - ) + self.get_success(self._persistence.persist_event(event, context)) return event def _inject_outlier(self) -> EventBase: @@ -253,7 +249,7 @@ def _inject_outlier(self) -> EventBase: event = self.get_success(builder.build(prev_event_ids=[], auth_event_ids=[])) event.internal_metadata.outlier = True self.get_success( - self._storage_controllers.persistence.persist_event( + self._persistence.persist_event( event, EventContext.for_outlier(self._storage_controllers) ) ) diff --git a/tests/unittest.py b/tests/unittest.py index 68e59a88dc0f..c1cb5933faed 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -361,7 +361,9 @@ def wait_for_background_updates(self) -> None: store.db_pool.updates.do_next_background_update(False), by=0.1 ) - def make_homeserver(self, reactor: ThreadedMemoryReactorClock, clock: Clock): + def make_homeserver( + self, reactor: ThreadedMemoryReactorClock, clock: Clock + ) -> HomeServer: """ Make and return a homeserver. diff --git a/tests/util/test_retryutils.py b/tests/util/test_retryutils.py index 9529ee53c8a2..5f8f4e76b544 100644 --- a/tests/util/test_retryutils.py +++ b/tests/util/test_retryutils.py @@ -54,6 +54,7 @@ def test_limiter(self) -> None: self.pump() new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) + assert new_timings is not None self.assertEqual(new_timings.failure_ts, failure_ts) self.assertEqual(new_timings.retry_last_ts, failure_ts) self.assertEqual(new_timings.retry_interval, MIN_RETRY_INTERVAL) @@ -82,6 +83,7 @@ def test_limiter(self) -> None: self.pump() new_timings = self.get_success(store.get_destination_retry_timings("test_dest")) + assert new_timings is not None self.assertEqual(new_timings.failure_ts, failure_ts) self.assertEqual(new_timings.retry_last_ts, retry_ts) self.assertGreaterEqual( From 06ba71083eefbe1fd9a8eeed10e541dd7b52796f Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 14 Feb 2023 23:42:29 +0000 Subject: [PATCH 195/344] Fix order of partial state tables when purging (#15068) * Fix order of partial state tables when purging `partial_state_rooms` has an FK on `events` pointing to the join event we get from `/send_join`, so we must delete from that table before deleting from `events`. **NB:** It would be nice to cancel any resync processes for the room being purged. We do not do this at present. To do so reliably we'd need an internal HTTP "replication" endpoint, because the worker doing the resync process may be different to that handling the purge request. The first time the resync process tries to write data after the deletion it will fail because we have deleted necessary data e.g. auth events. AFAICS it will not retry the resync, so the only downside to not cancelling the resync is a scary-looking traceback. (This is presumably extremely race-sensitive.) * Changelog * admist(?) -> between * Warn about a race * Fix typo, thanks Sean Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --------- Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/15068.bugfix | 1 + synapse/handlers/federation.py | 5 +++++ synapse/storage/databases/main/purge_events.py | 6 ++++-- 3 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15068.bugfix diff --git a/changelog.d/15068.bugfix b/changelog.d/15068.bugfix new file mode 100644 index 000000000000..f09ffa2877bd --- /dev/null +++ b/changelog.d/15068.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.76.0 where partially-joined rooms could not be deleted using the [purge room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api). diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 08727e485741..1d0f6bcd6f8b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1880,6 +1880,11 @@ async def _sync_partial_state_room( logger.info("Updating current state for %s", room_id) # TODO(faster_joins): notify workers in notify_room_un_partial_stated # https://github.com/matrix-org/synapse/issues/12994 + # + # NB: there's a potential race here. If room is purged just before we + # call this, we _might_ end up inserting rows into current_state_events. + # (The logic is hard to chase through.) We think this is fine, but if + # not the HS admin should purge the room again. await self.state_handler.update_current_state(room_id) logger.info("Handling any pending device list updates") diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 9213ce0b5abb..9c41d01e1317 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -420,12 +420,14 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: "event_push_actions", "event_search", "event_failed_pull_attempts", + # Note: the partial state tables have foreign keys between each other, and to + # `events` and `rooms`. We need to delete from them in the right order. "partial_state_events", + "partial_state_rooms_servers", + "partial_state_rooms", "events", "federation_inbound_events_staging", "local_current_membership", - "partial_state_rooms_servers", - "partial_state_rooms", "receipts_graph", "receipts_linearized", "room_aliases", From 5febf88b6c5194582f427142dc0850625547c0d9 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 15 Feb 2023 11:47:57 +0000 Subject: [PATCH 196/344] Update the error code for duplicate annotation (#15075) --- changelog.d/15075.feature | 2 ++ synapse/api/errors.py | 4 ++++ synapse/handlers/message.py | 6 +++++- 3 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15075.feature diff --git a/changelog.d/15075.feature b/changelog.d/15075.feature new file mode 100644 index 000000000000..d25a7567a4fa --- /dev/null +++ b/changelog.d/15075.feature @@ -0,0 +1,2 @@ +Update the error code returned when user sends a duplicate annotation. + diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 9235ce653638..e1737de59b51 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -108,6 +108,10 @@ class Codes(str, Enum): USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL" + # Attempt to send a second annotation with the same event type & annotation key + # MSC2677 + DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION" + class CodeMessageException(RuntimeError): """An exception with integer code and message string attributes. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 8f5b658d9d28..aa90d0000d40 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1337,7 +1337,11 @@ async def _validate_event_relation(self, event: EventBase) -> None: relation.parent_id, event.type, aggregation_key, event.sender ) if already_exists: - raise SynapseError(400, "Can't send same reaction twice") + raise SynapseError( + 400, + "Can't send same reaction twice", + errcode=Codes.DUPLICATE_ANNOTATION, + ) # Don't attempt to start a thread if the parent event is a relation. elif relation.rel_type == RelationTypes.THREAD: From 27a3a72a50cb24f25e48fad1e6e79aba2cd1bea2 Mon Sep 17 00:00:00 2001 From: 999lakhisidhu <42063995+999lakhisidhu@users.noreply.github.com> Date: Wed, 15 Feb 2023 16:39:31 +0400 Subject: [PATCH 197/344] Support for selecting the Redis logical database. (#15034) Note that this is only used for key-value store (cached values) and not for the pub/sub replication used by Synapse. --- changelog.d/15034.feature | 1 + contrib/docker_compose_workers/README.md | 1 + docs/usage/configuration/config_documentation.md | 4 ++++ synapse/config/redis.py | 1 + synapse/server.py | 1 + 5 files changed, 8 insertions(+) create mode 100644 changelog.d/15034.feature diff --git a/changelog.d/15034.feature b/changelog.d/15034.feature new file mode 100644 index 000000000000..34f320da9200 --- /dev/null +++ b/changelog.d/15034.feature @@ -0,0 +1 @@ +Allow Synapse to use a specific Redis [logical database](https://redis.io/commands/select/) in worker-mode deployments. diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md index bdd3dd32e07f..d3cdfe5614bb 100644 --- a/contrib/docker_compose_workers/README.md +++ b/contrib/docker_compose_workers/README.md @@ -68,6 +68,7 @@ redis: enabled: true host: redis port: 6379 + # dbid: # password: ``` diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 2883f76a26cd..75483bfb125a 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3927,6 +3927,9 @@ This setting has the following sub-options: * `host` and `port`: Optional host and port to use to connect to redis. Defaults to localhost and 6379 * `password`: Optional password if configured on the Redis instance. +* `dbid`: Optional redis dbid if needs to connect to specific redis logical db. + + _Added in Synapse 1.78.0._ Example configuration: ```yaml @@ -3935,6 +3938,7 @@ redis: host: localhost port: 6379 password: + dbid: ``` --- ## Individual worker configuration diff --git a/synapse/config/redis.py b/synapse/config/redis.py index b42dd2e93a39..e6a75be43429 100644 --- a/synapse/config/redis.py +++ b/synapse/config/redis.py @@ -33,4 +33,5 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.redis_host = redis_config.get("host", "localhost") self.redis_port = redis_config.get("port", 6379) + self.redis_dbid = redis_config.get("dbid", None) self.redis_password = redis_config.get("password") diff --git a/synapse/server.py b/synapse/server.py index efc6b5f895da..e5a347524763 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -827,6 +827,7 @@ def get_outbound_redis_connection(self) -> "ConnectionHandler": hs=self, host=self.config.redis.redis_host, port=self.config.redis.redis_port, + dbid=self.config.redis.redis_dbid, password=self.config.redis.redis_password, reconnect=True, ) From 39795b3a4e6af3638780601225ea78bb5fc45e0a Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 15 Feb 2023 13:51:37 +0000 Subject: [PATCH 198/344] Make it easier to use DataGrip w/ Synapse's schema (#14982) Also tweak the schema dump script: - add a note explaining myself how to use it -Explicitly call `poetry run`, because not everyone uses direnv :( --- changelog.d/14982.misc | 1 + contrib/datagrip/README.md | 28 ++++++++++++++++++ contrib/datagrip/common.sql | 1 + contrib/datagrip/datagrip-aware-of-schema.png | Bin 0 -> 13610 bytes contrib/datagrip/main.sql | 1 + contrib/datagrip/schema_version.sql | 1 + contrib/datagrip/state.sql | 1 + scripts-dev/make_full_schema.sh | 20 +++++++++++-- 8 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 changelog.d/14982.misc create mode 100644 contrib/datagrip/README.md create mode 120000 contrib/datagrip/common.sql create mode 100644 contrib/datagrip/datagrip-aware-of-schema.png create mode 120000 contrib/datagrip/main.sql create mode 120000 contrib/datagrip/schema_version.sql create mode 120000 contrib/datagrip/state.sql diff --git a/changelog.d/14982.misc b/changelog.d/14982.misc new file mode 100644 index 000000000000..9aaa7ce264ed --- /dev/null +++ b/changelog.d/14982.misc @@ -0,0 +1 @@ +Add a schema dump symlinks inside `contrib`, to make it easier for IDEs to interrogate Synapse's database schema. diff --git a/contrib/datagrip/README.md b/contrib/datagrip/README.md new file mode 100644 index 000000000000..bbe4f3a5a323 --- /dev/null +++ b/contrib/datagrip/README.md @@ -0,0 +1,28 @@ +# Schema symlinks + +This directory contains symlinks to the latest dump of the postgres full schema. This is useful to have, as it allows IDEs to understand our schema and provide autocomplete, linters, inspections, etc. + +In particular, the DataGrip functionality in IntelliJ's products seems to only consider files called `*.sql` when defining a schema from DDL; `*.sql.postgres` will be ignored. To get around this we symlink those files to ones ending in `.sql`. We've chosen to ignore the `.sql.sqlite` schema dumps here, as they're not intended for production use (and are much quicker to test against). + +## Example +![](datagrip-aware-of-schema.png) + +## Caveats + +- Doesn't include temporary tables created ad-hoc by Synapse. +- Postgres only. IDEs will likely be confused by SQLite-specific queries. +- Will not include migrations created after the latest schema dump. +- Symlinks might confuse checkouts on Windows systems. + +## Instructions + +### Jetbrains IDEs with DataGrip plugin + +- View -> Tool Windows -> Database +- `+` Icon -> DDL Data Source +- Pick a name, e.g. `Synapse schema dump` +- Under sources, click `+`. +- Add an entry with Path pointing to this directory, and dialect set to PostgreSQL. +- OK, and OK. +- IDE should now be aware of the schema. +- Try control-clicking on a table name in a bit of SQL e.g. in `_get_forgotten_rooms_for_user_txn`. \ No newline at end of file diff --git a/contrib/datagrip/common.sql b/contrib/datagrip/common.sql new file mode 120000 index 000000000000..28c5aa8a1b8b --- /dev/null +++ b/contrib/datagrip/common.sql @@ -0,0 +1 @@ +../../synapse/storage/schema/common/full_schemas/72/full.sql.postgres \ No newline at end of file diff --git a/contrib/datagrip/datagrip-aware-of-schema.png b/contrib/datagrip/datagrip-aware-of-schema.png new file mode 100644 index 0000000000000000000000000000000000000000..653642da9130d192dcec00eff13cc2698f311601 GIT binary patch literal 13610 zcmY+r2|U!__di~FmljegOIebL48vH*8fA+tDQ0YgF~%-q9m~j;WXm>oLa2~2vM-}- z@v=qstuSPdvBVgD)93y9ejfk-JRaw9U-!=I+;i@^=RVJUz3zEM8t7>#I6OZ@fwr|899Pj#5*eGJOl)4|R4<`V}O)CmoY!%cC?n;Iya zn-Fn`ad zFp!i_oM1R%p!-B!Mpar=R8&@0*4Wq>0)Yq%3kwPg-q(C6A|j&m=#h|+khF@FgoK2E zfPkoo2p9|&7Z(>15q+IYm`^d?Ctk5B&!+(hvzF*MLW|U@2wEduCpC+TtV+Nf1OeCkUo%o~|Gc zdi^X*910dwL&&J<`a42IL?jUR#e9+4qH>QNGn<}24>D1gd5U_5^)`PfCZc8Ill@f6 zBflxhL;tlk%qh@O0wnUjy8XdppD)Fk7eUM3d9vmG*zX10s{PVYcpzUV@!<< zLWu8SlJ_OSZz8SSy|Lc*5Etj%uU-%rG{hgRtA#ChcYvpQe*mkvd1y!l`g`AhXjffd zr=c!mZYkx5!=Y`G44y*#;d*Z~{qCtri-^iRHkU*@_0Z+DRtmwDipR2qS>iYv{isI2h7V5GG$ z5QvnC%^NflQWkA*8(C8Dktq8F3sVwD8p;<|G<-p;g+2>{Ky-EV%oJf^#u6Y;bJgU+ zieij}qPAp>o1~8BLkU=dosLe1Hkf2C_R{5^h?w4eZS{=Tm;hh*hlUO&p?NP+98Y4W11DB90LP6wVv z_*_#IUX$VN?P7J`!aCj#om2ECr!+n-E(Gq5&_KS*NRN$H7guq~d;bC%mWhoC4%D-- zf2yr*?}m|*lGcE$Jdc0-Aup!3ED?n;&aZ6SC^pJ4(Moi5(X|VBfz%@=K2tJ8rrAhI z=;_!>fs74wjYFP%twp)|zt+(+LVMnGdzoVu8LEr>NNaV>Avipcwe3iGB&7&>_c7+l zi4!+ZXg^SW;yb>Y6=Z*PiL(RE5_ZyhEtBxt&Vx^4BCORuw)K$>YYZk*D;oM+^Dw^M z@}XGOqX!?gV|+q1;{E}GUq$p-oTts%zcU=Xb@c0Ltm!Ls8CtarJ+9LAGkI3~^u~`c zwX&e?tSl^g8atgfHD-&nlbrzvcdm9fY*3mgoeF&o3Nz7p5TznPlMic`q)2|#eR}6% zFPb6$1f$NL0{Xw`+!#BF&kes#5FAdA4|$cbjwj0jLvz-)Sf!==xyf@IYXaE&C2GxPg$PPB|P2V-5yg?=iSkB!3*3T90{Cw{G%3;RjN8E{J z?C*C(Q5rs}E2j%scs+Sa9~V=vrxr$y1_v@v*&ID}+jd78Ay_65A{(yDS3f!&${F|W z@EFHhy2in>oAJh9dM|@#N^36= zUXP~2&3!UPz4{I_Q;?Mz+h9?B5n2C^4}B#OpHg<&*Gy#WhQh;fiG7HR(LkHz%2-?= z%ITxX2avN7o#}NW7tixCf}MH2Z!C)vbdR*xL3Buj-K*| zAC#|n+Y4BDBhu+M^nSWTZp(-h#2Rj(Y1s%`8&%A=**C-u8+ zACmt8BI=7r<6^w|ZsakJ<^2BPY3Tk_*lFrIxW_5Q7(&bS)nPq8G|(AaoH<9KT^S+m zt_M^*eKL$)7SgvF1I^Sc&iOWxnsR)5$W+SQenPqVV?!`ecb0E$_f?MTo+Ih0?%&B?r0E*ggV@MaQ6)|y;N28D~*J-zC>@{+}H z4P_0O<>M?lJ=-GUcQO61)X~~9Lf7I>;uijUMh9hU{_vY^C*AoH%bi>2oU2U^2fWL6 z@2r1|4jB9DgZ1yh(dUxDWwVcYs`auXb>_`l0ZpBn+=u-A*XedE%=@3T0Up$(8kKZf6o#qEwdbQxyezkgtyt z4xWl?b%V{;l6nzhK^sL5rue}7Y`t2|X|6s(+eHJL(6B`NwCzsNxY=0i(LWe!J*&NC z0ZYl0!l}y-8_x70?|EEb2)zQZUIX2~^cQIUkNyRmORzHlz4<1AN%bUpY${@HW-#sJ zz>bKDKQyOP=Q`t_y29bNw6!=ZKU`=hv7W{M2&Oku#$}HeMSS`d*Xtbg97O^d@$UZc zJ>2wdsKp<|2;_)kaU<11@|B3OCH_L3_2V)GN!pElN1sw7pdz~yX*5%Ljd3sNy8^7x z9lpajNy;9YgKS!x_*|3a7R?sU$03)mR0Vv03pUy>3jCAIt!%T*Ubu6xV@B~dJpR-} z9PE1(IJ+&Ua1o5pa^E}%T}Sje;v-q=o2Y9MEHl}iML6Wx-W&O0+CkCaj!6A}c$**X zpcGsGE@<4Y_RChymea>ynn{6U0rl)aZ;dZab*~I94e0S!8AB=Gmi&3ST?%l>F5+6g z;7HBYqowaS)GWUs&f2t0pHw*{ww$wTlXlR$rZ_LHdq?GqSB$kjgIKYVuskvZ^gV|6OKB=t@du{Wi8EY^Bfjw%vVLEA=1VT>*%{`|FF#Kma@tysTo6-F#81^T#WrF=ivSh^WhymnzL&z_L!PB-&JS=ktm=OYy`cBY~+t8_S=?r&3IGPSVN)D~UTM!Yw=Rzv`(Vu4-*^kyV?`c0dHL@eX>ycJ;U=j)T= z+@YGV!oNB#z+(XNtbb}?G?1jOES6GVzDk>R?Q%x`2Ytk^-5PwE_~>bTg#tp)EOdz(k=y@LocppO ze1_`4_qzHkELKHtMQ`aDy2bVl={C>)rDoA_cc{{5OJ=-BIO;4Y^uN@ne*T}@Us!}c zeZv3Oc;AW>^dE{!N4ZQCKFT=)jq$W0QUZ5!ZaG9&oH^}c9t*zVFUbkyo=UVPw!%Z% zGRezxh|F36VvbRHa%q*t7-Jm+|g(!F4*oj z#$=>=kLx7_2N}n3oz>CFl(x@>4>wWsQ!g(2I~_OUd^l4I&@x_ghA`9F64eXYNUozH z1@w9j5kCBQo+Gv6`6>SmcFV_De{}Ik4Sm&Eah?|f7^(}SURAF-S5n9`wAwW@puvcV zz;xQnoX3AZN;0VI+JeLxR~l+?BPfQ8p-Ud0GD_lrzWDIdUqwiPf^yeG4wW2xvGiWQ zdK%rgTRE(owSCJcu*R+L;l_J{l{8#V8+S3nx>w7a{_=jW45WJuR3)tS=C45 zXM~!!CgC`jkuD{Z8`n@9=L?Z$$qXtoU*}fJO06f5T?NSp2d_*9+}8R#qa+fShrsId zYtli7C~)ctfYWNfs3qo#6E3l6bt=2#?I1ih{wt_Py|4_&W-xoN3 z_I5Ky5H?+DVJJB6iI4HhKp_JukJM{D4UU~Rq=Q?3EmNpqgzZ(HaT2-hO5-qQb@ixX z1bi&sH|;jl6y$~?>8?#sx8tUG#{cxD{A^W=^!4KnAim&L_f{LsbLS52{vDMi zrBBqraBxSj%tu_%siUIJfXmU?-U3Zm`<*)2fKGACzz4Qz0+;211Md+oen075j zk=u9ho;)p~C?yt|E%Ft+CSBMtg*K0pJRE&mpq}r|c zcM|Tb9t$fzv2*?^1g`lFLjJtlmw{>K?L9k;h4QV6_?C#6jT4_SyTsLP>*KYoI4VQn?k(~Z%}P!Lo~@FzI}*pLYrwnvy*;jt zG2KUCIa1ikxMb9$(zrZ(q8>6?<}4T|+@mV(6tv<&?H>>N$v3oWi=7S_2OlEUXE&io zhg%mU8Dp3irxit4tL#Dz`_h@?BQ09Y;DP0lHzxSIl6?@h+w1ignu)C8J~wsX*#-J$ zk-dtvg>NaBJvz63XvTDz=(`cgy+v^!HC`O;#e6KTD!c7SEae&(vZ-`pK&KB?5M>N! zf`2NKyFN3Q;T$Tsro4~;9FHmNrM@FdK1XbhmyiW}?{cQvLTE)Eq48RF;jnY>RXtnnqmxdh6|vnm5-zgHNAeY;$&e_=lE(zRVD*f zX|Fpnid&rYUq?>2+_!r{Ilb78o!r1pQ|o7F>uFHRP#`n=XY|wKQ6Gr@-Mx)Jdnw>? zlsV(Uu7^r4UB&sGnhN6aLN}9I<)QJ4k9X8#9m?L6d3E9 z=Uida6?~7ZIGul)dw+0Bd-QoLzjff4tuWcrUJ6%rGmVdYR1ldG#|yt3!@o0dfs8e| zMsq(Gqw&s3Tj5&~X)=}FXDN(rhd(gd@>oBZyp>lIW-Gc>1Y&Hdn4zn~4>mi9k}{v> z)Viv}|6cOPTk7w7*4(=-Tfw=x!LmRmT!R{%dW$Eyf+LNbDUe}ma~0GgP=1{4J{Q8O z5!7U^`OwO%&E40K5WNyhvoGped#Ox&^<_~#&#)%O`T;&JYR-U!KOn!c!uqa!(m_l# zLoW#Y`FYD6A@xQikULc8s%JMVtQ`k03-YD)fFz`zmS>)%cHYN9%S0j#i6ujo&PEj_ zBao(whO&O#=*VPiCiVIk7q|nhC2_{mLZA(RhFCnrZ?-EFGcEHz=-}YacMihR z*skhldM~AD|J&EJyh{tqeF=5R7iWUWsd);&znUD#*Z=BmhTf}E`#ts}lj3RWz+FXN zxPf%ohc05HL1`!*v@Cxgvc`^c)|P7{VSpPbVAoq{lg=_A=+2^YEe1xmfxH2LEFX!w z3clfG>d-K%55IT{A5p89p*qN(C7;Xn{XFX2E6~F15|gj&_FUXl^rwpxHqV1h6-kO_ zi|MG<)PjA~2uPs^S!2!FXCig|8t4M$>RXF}J&z=P^H>QPF&GnZLw}$l;&0C{e8&oW zEs9*uYKrM{oj}$68CH{#hP8?^fAAVTs9(|dJB&P8YHV>Jq&ducYIDno%MWopXKLM) zCT8?D$67&aX<}bl!&gqy@5;q#WwTt6zApah3SsB_m?1ti6?HcW)djiM1wo~Cnc1eD zK8I8SV0&?-*OOb$uOAX7HXapHAuox#aQY?}|DHy_`BY^Nz50e;6$X;XZ_sJfshRz9 z+t0G}hux#d4h-%LsV=5Z-)8Q{n}PNU|Ngx>8C#;S#r8?#4^+{{dX+e~X*e>4lHV6% zM$4Ko@IS0Ft@_cYc&I|G_L>;$FDF%lP@-c4iL0!PYg;G+==~yiwGqkk5PxC(PlC~I ziE=9>0^a*W^#@Gmw{@{B>Y*2@ZCtI$A16gU)xT4=8AU=dap$sJh?i zI#&HsU^G5t^&QA{UEF2EeFL8n4We$|Qs#i$IXh^>`|7f5ZJ|N;3nhUZ%{}}Sac_Nk zI?*)Xg+RgnGf(I6TBzwy@2WQ*;Z&2(#k<#nMjFjA+6x7hB6@Cc`59SS#M6VHOwc9G z7cGrvUNwC*-fj;LcZ`>IT2=EWw;>gs-OA3;Y)RAGeIBS|v9Y80~Cu*x~u58`#Xi?Ewh$@Yp{8K{}-hWz8%47C1s%))}M9^1$P_dmKSajU%2 zG|SZu<_Rx32~f1pcb#pmwTqh&d6r*t#%w0OioEY1@s?s)--0T#7O$W0%?d#`zRk{7 z8U^odZ$EE&E`M>*ILhm(bH`6)Xe_627JMPldTcoXHF5y_Lz^$zf?PnIXNz@xWv4*B zbOCR~m<8*12>uae<}Pm^Dx6+J>_gHUi6p;c95Fpk1Pmo)WwH zW+)4>(2c$zskCfT5k1?JjWdqEBEU8(_S@lf#_G%PQyI-6AB$DD@`quz`lmNt)N{>_pGOZ*{?e2;r;?#dG0h8FJO1Uo7D}mP%Tbor#so{>1u%_(+*~VBI&48=^l%mZ;(%!wi{n)Cc#iZ=Mc`vY2Ngrp$&P&b-o+!>6P;jw zHpOL=a&wDl3`d5k^cQH@MD@+fJAG2=LcWn3gAcw)f9UM$t@4_XPIwa3f3lBqy@#uR zBH&*BKEwBFv||kf&d9T%LCR0v;KOh9jMzW^d=$Szf2Vfl5OOzc|1un0*WT}M-E?Y- z&&KHS&Eeqs`(37Th3^9WrB*~OFD~q(@R})tcPx{SaGnAwzv37y?C6?(diEJfZWvm| zv29yOL8)TtIh91(^mYrUq))2uFQ}z_Wxu>jdb>~XXJ1A6J+)8_)#2k#{mkbipTg%2 zl5NR2^+a|WZnW&DZRv;N&`RTVWquCer?p~j%va5cE>o!6owUW|UcXyGFI?7A5P_Fx zRGePBu3WT_tawJrq3F$1h!Cm%erB7$Z?EsE`+!5OCCi;I=_m|mnw@(UVm z-oItnE5-5q^M#|Cj)isY728S6Ki<{ugOP zMPrbrA)bbW-hB(Z7<@Q*3K$hN!qe%7E^`$rD# z1CbTaUn`C(15*?2o1XMYbD7A+a|)y|?_y)>ydy*v?hIWk{uLJu2IN{_z6DV`F!NfW z(I=4bEd97nxBJ2wZxu%$7~l#LHH1}+CQtE|v36bv+!HnpGDwx_LZml(w$I&!^$xV&L)A*WJH%eBACVS2CgKCKCslEqj>S7-WJL zKI1ce&b0)P;5u7rU7$q8Z*$kde*>Z($jPjVQ#4Cxr7o4xtdpqP`%R=mcPpNUGYHQw z3>*=;I88ytbEs{9_AyRVVU{Wd66gR4)!*)z>mvT5cYv$lQs+5RbK}V`zCmu#OWd+j z=m6yUJ{@85H>ulm(cN3|6O3Ntk!_eq2hLp>s~)fjMW45_@;vx9OH~Gz$()o>WCeBI z7;F=%JlTfni3+S$ARXoBk;%4Qn6Jdu%%JhWX``nPzdsW+c+k1!pZN5h(h-nIs&b?0 zxsLnOpeVTAeq;X@@cB59sT#Bq*uZ(xWakeD3aduoI@~XsDnDHE3;q;EdU)g+2uVVe zICb^(G%7bP3^iqiX9TxW*AzA{egFMV411ST=Baev^1=3dBd~>n;6I`Yyfh6Vy|bW1 z7smn82VlyNjP|Ove{PVb2gd<@xZJL-HgMRFyoFU+*0*25q>{SbdDWqvMd>9oePvbG z1M+l-EKfsMGTb<><{umlH6(ksyRbvn?$Ak+2T|#j#Q==BcKaq%e^s-5_(;oXq{Kzu zt)Km)liqV-Yi6`TOP()rRNGAFrr`Zaj&ZRz!EUv)Pi#)g5{| zy5qI*yjNHmPBU>ZZkq7DG<3WdRhneQZXt8p#=A{GB0iN?yw_2`F(vC}m->10&Z0)G z#|Cs`S^}q-?K4Abnozy*l>v4JKi5pOUO0nt5Fn|xK&&(SWitpN=hno&J`}fNUntyZ zURaM5U|p(UHBLSLU7`wL97FhSn9ZN5JOMOg6z4EaGGcwBjqiLjA6DtSS%eS!m3)`t zXcyE>kWb|4@2w~i)!{XKlhw7Sz+PyOwLtD*8;*Lqci zfcMt07d1?vjwn>p!0Zkg+sFFw3fo>l=tf&RTb9TRqwM-V(iqYJrePc~fiI|YP%?Hb z)Or;R*=SP|Awkt>>{)VGWC>2&&@-(lx;wfjomN(%?_v9xYpe1_qmbd7IQ zn(y5WX|LjonH_64dT(Cgfyusf4nqqxV-Df51rJPBS8|2hBdYZ;!f=i196h;_j;?R1Ko#MR zR{Pv&sFLKlYvwvG>O)wyz{*;ecLuCy%zy(kvzb#shy?Ku{pL6pU57ev^4*er}!9!A}3RxY{NY;u(LG2|DOHi4)cwNX45m>4>+8_FAuyn>(~JCz*z zlAHMYd8eW(*an^wfBK+&5~AtDLHiWu={?gON{~p_vWMS#d5)ECRI} z61L{6_}Tt2ofz;PHI^+PQGXEbCGXw^fog7i>w8Y#>4rc%j99;7M@Vc>@Ic8BO_&R) zzx{KYUviLIe!aYo!gcVuTzCAlcOJ|9N8}u*kFlc9xhMB@c7#K^x1eJz6VRj`dVgE_ zGl7AWjpnTbyW0NX^RYl|u5K_I{&4nF@e?P}bKfc^M+XLoQON!D0PZ#P+_?xu)gOb8 zxsIg?DExcUv$*l(rxzRbxajUpp3F1ol(;KV_pr&l@S8OtA?b&w>^Y4;G{;lNeCL}s z*n01L437r7KL#NF<$#+T!<5D4?!H^$$c5XlgI3w|P8=^{N#-!{DBR&|#4crzNSpn#897~a5)@daso@r40lKklvlXfch z=!$ZO+@b%WWnFt&k{8QZ@58fMas(z&1a?)>VMR#CBbg=n_yMld3%}mJ zTt3)314Gae;S*Tx((EoSdMLZJmBuMUXCpcJN6Dx~)|~)0WPn>q?=H~W7=UKTe~Mya zNrTaev#04gS#i=W@&7M!PXH3^`KA2l=>N=c0sduL>g+liX z!$>aRdox|8C_B)EOzNSu)z&t0hCHiL^eG+;Kkx$>559}$w0{~H*l?Fq$2F2K!`i6| z(u_=9@=hz5N*%kj0C2%~>3I0>hC5uI)6>!Oh%C*g`(9R9rHV;cOT8Hx7+V6W1i08j zlvoP}0;wXMfm>lK&o;fj3*PRe=K@^D{mlg!LAM8*!(7`z{PlAIUj8!-_T(dP&oYx=&%Pt^U2ry0#e+^n0bX+H2 zKUGB6VYhl@1tO(`M0xF#Xn?t8oq|>hw-};5M_*Zscy%$X zo^+v;sN7MrccrKJQd;l0{41jc!anf@%5#JLZpR<%Cimr#QKgw*wN76^=n?OiRp*LI z*>=qRzRj+sUs*z{sIIv;&rOD;zcWi(Sq)iYb88!V?(_5ItJ@(Mxd>e3?=9lqd&NJ`(H?ZHN*Rz>M=RZZQWys7 zcr66@ny1q@U-u`eF^rFC=$tsl^@AK4u}zTvlEx`8y$~Rk%1H z?T7ysG_5o(;2uD=VJvpPv*4x}gb53c^!kZ3U)u#WMcKCb{xfttmb5*llykZwy7Ap# zStm?nN{_sMO%yP&+AzO=BH zExx{3F^Ia`=V6N9Os){@y^Y)Y6UE+yHpRbZuk8M_-q!|EYP=4!)Thq7GX9ot$J3qg z)WPvY#cgGmO+}^$Ht>M!zY|c*wU*(ihBrU3z7JZqVj?_+)ueMB8mC|JtAb?x#CkgM zqNkaUV1@LYB1U@jYbDy>3dcRE8~(F*)RLn$MoBlx&CgU;78z>ZqPAcZbv#biW;lsA za^vUZl?V+@aE+7P$E-7t&0|-#|HeAb-OiK$2j72D{Pe%D4Sm514y=M)q=V(4TJN3C zOP-6?X5>JRcR?QhY0LVD-D*MpXV?mo+K!6i9_Mh=$4VH@tg$!!e%a5Kv3CBbmHBA=I$F2UjR;{H#Mg3!JeV|e@Z209KusrWwAw043W ze~r7Up)7UC>k;{p31g=A2ZU|l?zrc4Dw*MaO3*n_nLnO0{}4N>2qeQ=KM*H1!k#aM z@33PV&kb2i21*sLq(cTij!37~KD$oWs~ZE7-|%lsCC}5+j!)o6pa4x+ct~stJ9-{? zcl_s(7(z^V%sl4Xs+;~#BC4>(PF~TNKYB2B6BP07?X22BU})@cK`y)AWwYatOOFypFyKSJZ+L6*=kzshRVh2mhc+yX~3qeUHsFj&iV zzn=(t%?>+TU#8_`4XvxuRn{z`#C9|yLjK$Z6$NP4Ow!0&i~ZLc&b|BfYUXVP1K?)E z-C`=V)Z0>6c_{?-zVw%sr5yfvW_mwDBR<=`!JlBOH8GXJ^8`pB*1s3{aaXmrQv1SZ zE!p{3!wJ0XdBLO8=VsME`8-?1PK>|4boLzIPpx#%D?6;U&#a!#c5;^bTpu)0novo| zR3Ql6We$m`BnzEq44g^Z3tZe=cTSjc#>{|cGw9WvG@rdhbPm=oo4G7*p)?%j6u7(Z zGu=Otrk_>)1!GYuY`O01TSxi1LD^Cc=>MR|(>Ff7#5kz3RyHKJ?7JpydgJ z8@`jIqiTjP#WL{&!w0J>0?LCEIVM@v2_14Cz*Vr%Ax&%gm&*E4={|5A;A(^^IS(I)|dc{y!#_m*e+wpO^C`7ln!!dEJ#Cfhur> zOfnVcdc@npp20?We-qD-yXwDe?5RTBGwr;I3jNTY+xTg?Jp2{$pAY{s_ zaRlme^30b?0&(21*xRg;*{WNEoYF)BAwZ>^{*3OZ0&A~Orvz5Lbgvb{!43WU(lk{S z@S43U)+OT=B|D)gLc<9>#d#LR0I0Hp5lm-9H!;(+R;YlbvcJ#U*n16k++$b2?K+ht z{JNjgK_HfF6fv>f6{u`D4Ooqd0xA%)gf8fK;Z*5)+SHt)5l<+DQ+OUb$A9~a!Qrxc zIih3v>vKG!3OUwK%2-gf?@g9tro{c*P3MD7MF)=ek(8Y7&IO)gJt@H~JxNd!l z0RV!@0fbCe^6;>~2A+~~8Tfh3MxzGtbq<8khdBuovsz6zBSdo`;Y6-P(Aq2H*aiHe z$J!kO%QGO^DezH?a*nOTX9xxZ#GpFo;%|3}xhU0nqXZ$%nDo7eY-~0+{~mhwgZ~=s z@hpmv=@iLC$m9X`t)OV|Q8T6vXOWoFue%!>3sTSCnV`b zGp3i=o!x1$ZSmFcPBCG0yDz>D%XhSbCrp^uUG_U6*+dFB)Z%x3)9YtKNulo_2+F$(~yJTRQ?h9`UqRWRtE{f7Q8=gK7Dn$)_UXmvRi}>xZGV22j>RE7AUDoDAx(@K#qJu$+m0_ zno|)SH&;@z`qO>g7m8}>!k~kKjt@9C7AKY&Ye0uWaVc%SuJb?agZ7+L+KDxOQh?4`ln~Gb0VFM?EP2wGUR65}l)pXrS@_;5*DGH^U1err6Z2`=w@2eV zw1U4KaJ0;G*y_uD_S8i;Dy5_BXjWZir+6_goC$jgrC8mu=4;T2r#sl~938#7IaSIRRt`YQqyZ$<{3rVlGmNsGe7XRWzfv~MF%a-o{b_NcrJ zxIZ|*^GaX;;lPJhQ4bvSro-;_F-5z+?yYCgo(#|RfmMvZ88FlUam#hP2oS{{3~1^o z^G0am176_ZGT~XyNd%z^=jm&JhS3vp>SDgtZY{JlJE4NVAop~chfVQu4@$U z&QCK)@e7&$%4o$bT&2DH`t|MS>MGtx($l+`n8QRDut92L+_2UQwo?FS3bJ{1Z%mdpEVgGGjh6A{ScuJJjLu`PS9;t-&wD66VS@O#d%;}O$gR#R+6wy(W|{SDf4pN4Uu*Sde(OO8`*}sXGrPCX zudr{)Y{)q)D4;~eau&D#ICDR0ywdl5=A;iO%1gFN@iYMK#e6vBnj9SR0*o`?I=oPG<1WZ!Ys4IUOZT&Kw4Z z$w5JHo8;eQF=`Hc6H{>`gG3jTi61lj<~sDTitSsA<-YPDrxjyMK|tcl=u~z}u~&+OBezspoNM~^;xW^MU@6aEw1x7$j2y_B`GjvS zZjG)jTOWe80t0UkXvUiDW_3BP$%TmCssB=cWQ-c~wS2$awXf)YnACxkc+uws5KXV# zbE^H0&lnt5!g-0pdRc+6k*B;Voypa=ms-nHUrPK#kKIo?sw(#g6s1U|#7Xa>=n);+ z>HUKd(qa0rjtA+`uZ6UFh}pcsa3?AXIa_o1A9-rgs5xxG2N8D4=%x{`zBzS_iW=(# zQ@k|Y`ksDoZ^qzZg>Ks9pAT-icV(kGUvQ2|KFIx<`Rq))*~irYZxrY)1=y8%VM2>8 z0#W6F7kvTrF+5iSgqOS86w9FPT6Gum4n1v{rs{5#WrrbpAwYHX2+m6w#sJZnN4&X45JqWO;lgM`n=ycu0(WALYT*l8q|17)EGQM8m5~7RgUcT+RYHo6H4$q@nO93AjnP~FBu8kq;)egIUHyB#$KDlZ%BXYiS6(kw zb7^Nw4gW++=q&bf!01L=5-HtqNzV|K^a!2jfR()R)Me7+F8EO6<6iJt%W z;lke*dNiife;4-OOu)Yh$A2@6|Chb=FPqg^_z_DzRL1fj1z`jFXG|xw)%6~HRI>{C Ef05{9b^rhX literal 0 HcmV?d00001 diff --git a/contrib/datagrip/main.sql b/contrib/datagrip/main.sql new file mode 120000 index 000000000000..eec0a2fb6dc3 --- /dev/null +++ b/contrib/datagrip/main.sql @@ -0,0 +1 @@ +../../synapse/storage/schema/main/full_schemas/72/full.sql.postgres \ No newline at end of file diff --git a/contrib/datagrip/schema_version.sql b/contrib/datagrip/schema_version.sql new file mode 120000 index 000000000000..e1b0985d7473 --- /dev/null +++ b/contrib/datagrip/schema_version.sql @@ -0,0 +1 @@ +../../synapse/storage/schema/common/schema_version.sql \ No newline at end of file diff --git a/contrib/datagrip/state.sql b/contrib/datagrip/state.sql new file mode 120000 index 000000000000..4de4fbbdf7f0 --- /dev/null +++ b/contrib/datagrip/state.sql @@ -0,0 +1 @@ +../../synapse/storage/schema/state/full_schemas/72/full.sql.postgres \ No newline at end of file diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index e2bc1640bb2a..473f54772a8a 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -19,7 +19,8 @@ usage() { echo "-c" echo " CI mode. Prints every command that the script runs." echo "-o " - echo " Directory to output full schema files to." + echo " Directory to output full schema files to. You probably want to use" + echo " '-o synapse/storage/schema'" echo "-n " echo " Schema number for the new snapshot. Used to set the location of files within " echo " the output directory, mimicking that of synapse/storage/schemas." @@ -27,6 +28,11 @@ usage() { echo "-h" echo " Display this help text." echo "" + echo "" + echo "You probably want to invoke this with something like" + echo " docker run --rm -e POSTGRES_PASSWORD=postgres -e POSTGRES_USER=postgres -e POSTGRES_DB=synapse -p 5432:5432 postgres:11-alpine" + echo " echo postgres | scripts-dev/make_full_schema.sh -p postgres -n MY_SCHEMA_NUMBER -o synapse/storage/schema" + echo "" echo " NB: make sure to run this against the *oldest* supported version of postgres," echo " or else pg_dump might output non-backwards-compatible syntax." } @@ -189,7 +195,7 @@ python -m synapse.app.homeserver --generate-keys -c "$SQLITE_CONFIG" # Make sure the SQLite3 database is using the latest schema and has no pending background update. echo "Running db background jobs..." -synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates +poetry run python synapse/_scripts/update_synapse_database.py --database-config "$SQLITE_CONFIG" --run-background-updates # Create the PostgreSQL database. echo "Creating postgres databases..." @@ -198,7 +204,7 @@ createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_MAIN_DB_NAM createdb --lc-collate=C --lc-ctype=C --template=template0 "$POSTGRES_STATE_DB_NAME" echo "Running db background jobs..." -synapse/_scripts/update_synapse_database.py --database-config "$POSTGRES_CONFIG" --run-background-updates +poetry run python synapse/_scripts/update_synapse_database.py --database-config "$POSTGRES_CONFIG" --run-background-updates echo "Dropping unwanted db tables..." @@ -293,4 +299,12 @@ pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owne pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" +if [[ "$OUTPUT_DIR" == *synapse/storage/schema ]]; then + echo "Updating contrib/datagrip symlinks..." + ln -sf "../../synapse/storage/schema/common/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" "contrib/datagrip/common.sql" + ln -sf "../../synapse/storage/schema/main/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" "contrib/datagrip/main.sql" + ln -sf "../../synapse/storage/schema/state/full_schemas/$SCHEMA_NUMBER/full.sql.postgres" "contrib/datagrip/state.sql" +else + echo "Not updating contrib/datagrip symlinks (unknown output directory)" +fi echo "Done! Files dumped to: $OUTPUT_DIR" From 3ad817bfe561e0b7ddcd8398a76a4a4d3d789138 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Wed, 15 Feb 2023 13:59:06 +0000 Subject: [PATCH 199/344] Fix federated joins when the first server in the list is not in the room (#15074) Previously we would give up upon receiving a 404 from the first server, instead of trying the rest of the servers in the list. Signed-off-by: Sean Quah --- changelog.d/15074.bugfix | 1 + synapse/federation/federation_client.py | 11 +++++------ 2 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15074.bugfix diff --git a/changelog.d/15074.bugfix b/changelog.d/15074.bugfix new file mode 100644 index 000000000000..d1ceb4f4c89b --- /dev/null +++ b/changelog.d/15074.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where federated joins would fail if the first server in the list of servers to try is not in the room. diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 0ac85a3be717..7d04560dca78 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -884,7 +884,7 @@ async def _try_destination_list( if 500 <= e.code < 600: failover = True - elif e.code == 400 and synapse_error.errcode in failover_errcodes: + elif 400 <= e.code < 500 and synapse_error.errcode in failover_errcodes: failover = True elif failover_on_unknown_endpoint and self._is_unknown_endpoint( @@ -999,14 +999,13 @@ async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]: return destination, ev, room_version + failover_errcodes = {Codes.NOT_FOUND} # MSC3083 defines additional error codes for room joins. Unfortunately # we do not yet know the room version, assume these will only be returned # by valid room versions. - failover_errcodes = ( - (Codes.UNABLE_AUTHORISE_JOIN, Codes.UNABLE_TO_GRANT_JOIN) - if membership == Membership.JOIN - else None - ) + if membership == Membership.JOIN: + failover_errcodes.add(Codes.UNABLE_AUTHORISE_JOIN) + failover_errcodes.add(Codes.UNABLE_TO_GRANT_JOIN) return await self._try_destination_list( "make_" + membership, From d1efc479252f71f196e5a339af999d2c632bc294 Mon Sep 17 00:00:00 2001 From: saddfox Date: Wed, 15 Feb 2023 20:51:58 +0100 Subject: [PATCH 200/344] Fix a mistake in registration_shared_secret_path docs (#15078) * fix a typo in registration_shared_secret_path docs Signed-off-by: Filip Rutar * changelog --- changelog.d/15078.doc | 1 + docs/usage/configuration/config_documentation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15078.doc diff --git a/changelog.d/15078.doc b/changelog.d/15078.doc new file mode 100644 index 000000000000..641f9a993b47 --- /dev/null +++ b/changelog.d/15078.doc @@ -0,0 +1 @@ +Fix a mistake in registration_shared_secret_path docs. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 75483bfb125a..58c695568972 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2232,7 +2232,7 @@ key on startup and store it in this file. Example configuration: ```yaml -registration_shared_secret_file: /path/to/secrets/file +registration_shared_secret_path: /path/to/secrets/file ``` _Added in Synapse 1.67.0._ From 979f237b282cbdaab8d74cc4c7473117093d63d9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 16 Feb 2023 09:51:22 -0500 Subject: [PATCH 201/344] Update intentional mentions (MSC3952) to depend on `exact_event_match` (MSC3758). (#15037) This replaces the specific `is_room_mention` push rule condition used in MSC3952 with the generic `exact_event_match` push rule condition from MSC3758. No functionality changes due to this. --- changelog.d/15037.misc | 1 + rust/benches/evaluator.rs | 4 ---- rust/src/push/base_rules.rs | 7 +++++-- rust/src/push/evaluator.rs | 7 ------- rust/src/push/mod.rs | 13 ------------ stubs/synapse/synapse_rust/push.pyi | 1 - synapse/config/experimental.py | 7 ++++--- synapse/push/bulk_push_rule_evaluator.py | 4 ---- tests/push/test_bulk_push_rule_evaluator.py | 18 ++++++++++++++-- tests/push/test_push_rule_evaluator.py | 23 --------------------- 10 files changed, 26 insertions(+), 59 deletions(-) create mode 100644 changelog.d/15037.misc diff --git a/changelog.d/15037.misc b/changelog.d/15037.misc new file mode 100644 index 000000000000..fabfe77d35af --- /dev/null +++ b/changelog.d/15037.misc @@ -0,0 +1 @@ +Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 8213dfd9eab6..efd19a216541 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -45,7 +45,6 @@ fn bench_match_exact(b: &mut Bencher) { flattened_keys, false, BTreeSet::new(), - false, 10, Some(0), Default::default(), @@ -95,7 +94,6 @@ fn bench_match_word(b: &mut Bencher) { flattened_keys, false, BTreeSet::new(), - false, 10, Some(0), Default::default(), @@ -145,7 +143,6 @@ fn bench_match_word_miss(b: &mut Bencher) { flattened_keys, false, BTreeSet::new(), - false, 10, Some(0), Default::default(), @@ -195,7 +192,6 @@ fn bench_eval_message(b: &mut Bencher) { flattened_keys, false, BTreeSet::new(), - false, 10, Some(0), Default::default(), diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index dcbca340fece..4a62b9696f9d 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -21,13 +21,13 @@ use lazy_static::lazy_static; use serde_json::Value; use super::KnownCondition; -use crate::push::Action; use crate::push::Condition; use crate::push::EventMatchCondition; use crate::push::PushRule; use crate::push::RelatedEventMatchCondition; use crate::push::SetTweak; use crate::push::TweakValue; +use crate::push::{Action, ExactEventMatchCondition, SimpleJsonValue}; const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak { set_tweak: Cow::Borrowed("highlight"), @@ -168,7 +168,10 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"), priority_class: 5, conditions: Cow::Borrowed(&[ - Condition::Known(KnownCondition::IsRoomMention), + Condition::Known(KnownCondition::ExactEventMatch(ExactEventMatchCondition { + key: Cow::Borrowed("content.org.matrix.msc3952.mentions.room"), + value: Cow::Borrowed(&SimpleJsonValue::Bool(true)), + })), Condition::Known(KnownCondition::SenderNotificationPermission { key: Cow::Borrowed("room"), }), diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 2eaa06ad7614..55551ecb56a7 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -73,8 +73,6 @@ pub struct PushRuleEvaluator { has_mentions: bool, /// The user mentions that were part of the message. user_mentions: BTreeSet, - /// True if the message is a room message. - room_mention: bool, /// The number of users in the room. room_member_count: u64, @@ -116,7 +114,6 @@ impl PushRuleEvaluator { flattened_keys: BTreeMap, has_mentions: bool, user_mentions: BTreeSet, - room_mention: bool, room_member_count: u64, sender_power_level: Option, notification_power_levels: BTreeMap, @@ -137,7 +134,6 @@ impl PushRuleEvaluator { body, has_mentions, user_mentions, - room_mention, room_member_count, notification_power_levels, sender_power_level, @@ -279,7 +275,6 @@ impl PushRuleEvaluator { false } } - KnownCondition::IsRoomMention => self.room_mention, KnownCondition::ContainsDisplayName => { if let Some(dn) = display_name { if !dn.is_empty() { @@ -529,7 +524,6 @@ fn push_rule_evaluator() { flattened_keys, false, BTreeSet::new(), - false, 10, Some(0), BTreeMap::new(), @@ -562,7 +556,6 @@ fn test_requires_room_version_supports_condition() { flattened_keys, false, BTreeSet::new(), - false, 10, Some(0), BTreeMap::new(), diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 253b5f367c67..fdd2b2c1439b 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -336,8 +336,6 @@ pub enum KnownCondition { ExactEventPropertyContains(ExactEventMatchCondition), #[serde(rename = "org.matrix.msc3952.is_user_mention")] IsUserMention, - #[serde(rename = "org.matrix.msc3952.is_room_mention")] - IsRoomMention, ContainsDisplayName, RoomMemberCount { #[serde(skip_serializing_if = "Option::is_none")] @@ -667,17 +665,6 @@ fn test_deserialize_unstable_msc3952_user_condition() { )); } -#[test] -fn test_deserialize_unstable_msc3952_room_condition() { - let json = r#"{"kind":"org.matrix.msc3952.is_room_mention"}"#; - - let condition: Condition = serde_json::from_str(json).unwrap(); - assert!(matches!( - condition, - Condition::Known(KnownCondition::IsRoomMention) - )); -} - #[test] fn test_deserialize_custom_condition() { let json = r#"{"kind":"custom_tag"}"#; diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index 7b33c30cc919..a8f0ed2435c5 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -59,7 +59,6 @@ class PushRuleEvaluator: flattened_keys: Mapping[str, JsonValue], has_mentions: bool, user_mentions: Set[str], - room_mention: bool, room_member_count: int, sender_power_level: Optional[int], notification_power_levels: Mapping[str, int], diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 1d294f879866..54c91953e106 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -179,9 +179,10 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "msc3783_escape_event_match_key", False ) - # MSC3952: Intentional mentions - self.msc3952_intentional_mentions = experimental.get( - "msc3952_intentional_mentions", False + # MSC3952: Intentional mentions, this depends on MSC3758. + self.msc3952_intentional_mentions = ( + experimental.get("msc3952_intentional_mentions", False) + and self.msc3758_exact_event_match ) # MSC3959: Do not generate notifications for edits. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 2e917c90c4ae..5fc38431babe 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -400,7 +400,6 @@ async def _action_for_event_by_user( mentions = event.content.get(EventContentFields.MSC3952_MENTIONS) has_mentions = self._intentional_mentions_enabled and isinstance(mentions, dict) user_mentions: Set[str] = set() - room_mention = False if has_mentions: # mypy seems to have lost the type even though it must be a dict here. assert isinstance(mentions, dict) @@ -410,8 +409,6 @@ async def _action_for_event_by_user( user_mentions = set( filter(lambda item: isinstance(item, str), user_mentions_raw) ) - # Room mention is only true if the value is exactly true. - room_mention = mentions.get("room") is True evaluator = PushRuleEvaluator( _flatten_dict( @@ -420,7 +417,6 @@ async def _action_for_event_by_user( ), has_mentions, user_mentions, - room_mention, room_member_count, sender_power_level, notification_levels, diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 7567756135b7..199e3d7b7007 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -227,7 +227,14 @@ def _create_and_process( ) return len(result) > 0 - @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) + @override_config( + { + "experimental_features": { + "msc3758_exact_event_match": True, + "msc3952_intentional_mentions": True, + } + } + ) def test_user_mentions(self) -> None: """Test the behavior of an event which includes invalid user mentions.""" bulk_evaluator = BulkPushRuleEvaluator(self.hs) @@ -323,7 +330,14 @@ def test_user_mentions(self) -> None: ) ) - @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) + @override_config( + { + "experimental_features": { + "msc3758_exact_event_match": True, + "msc3952_intentional_mentions": True, + } + } + ) def test_room_mentions(self) -> None: """Test the behavior of an event which includes invalid room mentions.""" bulk_evaluator = BulkPushRuleEvaluator(self.hs) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 0554d247bce5..d320a12f968d 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -149,7 +149,6 @@ def _get_evaluator( *, has_mentions: bool = False, user_mentions: Optional[Set[str]] = None, - room_mention: bool = False, related_events: Optional[JsonDict] = None, ) -> PushRuleEvaluator: event = FrozenEvent( @@ -170,7 +169,6 @@ def _get_evaluator( _flatten_dict(event), has_mentions, user_mentions or set(), - room_mention, room_member_count, sender_power_level, cast(Dict[str, int], power_levels.get("notifications", {})), @@ -232,27 +230,6 @@ def test_user_mentions(self) -> None: # Note that invalid data is tested at tests.push.test_bulk_push_rule_evaluator.TestBulkPushRuleEvaluator.test_mentions # since the BulkPushRuleEvaluator is what handles data sanitisation. - def test_room_mentions(self) -> None: - """Check for room mentions.""" - condition = {"kind": "org.matrix.msc3952.is_room_mention"} - - # No room mention shouldn't match. - evaluator = self._get_evaluator({}, has_mentions=True) - self.assertFalse(evaluator.matches(condition, None, None)) - - # Room mention should match. - evaluator = self._get_evaluator({}, has_mentions=True, room_mention=True) - self.assertTrue(evaluator.matches(condition, None, None)) - - # A room mention and user mention is valid. - evaluator = self._get_evaluator( - {}, has_mentions=True, user_mentions={"@another:test"}, room_mention=True - ) - self.assertTrue(evaluator.matches(condition, None, None)) - - # Note that invalid data is tested at tests.push.test_bulk_push_rule_evaluator.TestBulkPushRuleEvaluator.test_mentions - # since the BulkPushRuleEvaluator is what handles data sanitisation. - def _assert_matches( self, condition: JsonDict, content: JsonMapping, msg: Optional[str] = None ) -> None: From ffc2ee521d26f5b842df7902ade5de7a538e602d Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 16 Feb 2023 16:09:11 +0000 Subject: [PATCH 202/344] Use mypy 1.0 (#15052) * Update mypy and mypy-zope * Remove unused ignores These used to suppress ``` synapse/storage/engines/__init__.py:28: error: "__new__" must return a class instance (got "NoReturn") [misc] ``` and ``` synapse/http/matrixfederationclient.py:1270: error: "BaseException" has no attribute "reasons" [attr-defined] ``` (note that we check `hasattr(e, "reasons")` above) * Avoid empty body warnings, sometimes by marking methods as abstract E.g. ``` tests/handlers/test_register.py:58: error: Missing return statement [empty-body] tests/handlers/test_register.py:108: error: Missing return statement [empty-body] ``` * Suppress false positive about `JaegerConfig` Complaint was ``` synapse/logging/opentracing.py:450: error: Function "Type[Config]" could always be true in boolean context [truthy-function] ``` * Fix not calling `is_state()` Oops! ``` tests/rest/client/test_third_party_rules.py:428: error: Function "Callable[[], bool]" could always be true in boolean context [truthy-function] ``` * Suppress false positives from ParamSpecs ```` synapse/logging/opentracing.py:971: error: Argument 2 to "_custom_sync_async_decorator" has incompatible type "Callable[[Arg(Callable[P, R], 'func'), **P], _GeneratorContextManager[None]]"; expected "Callable[[Callable[P, R], **P], _GeneratorContextManager[None]]" [arg-type] synapse/logging/opentracing.py:1017: error: Argument 2 to "_custom_sync_async_decorator" has incompatible type "Callable[[Arg(Callable[P, R], 'func'), **P], _GeneratorContextManager[None]]"; expected "Callable[[Callable[P, R], **P], _GeneratorContextManager[None]]" [arg-type] ```` * Drive-by improvement to `wrapping_logic` annotation * Workaround false "unreachable" positives See https://github.com/Shoobx/mypy-zope/issues/91 ``` tests/http/test_proxyagent.py:626: error: Statement is unreachable [unreachable] tests/http/test_proxyagent.py:762: error: Statement is unreachable [unreachable] tests/http/test_proxyagent.py:826: error: Statement is unreachable [unreachable] tests/http/test_proxyagent.py:838: error: Statement is unreachable [unreachable] tests/http/test_proxyagent.py:845: error: Statement is unreachable [unreachable] tests/http/federation/test_matrix_federation_agent.py:151: error: Statement is unreachable [unreachable] tests/http/federation/test_matrix_federation_agent.py:452: error: Statement is unreachable [unreachable] tests/logging/test_remote_handler.py:60: error: Statement is unreachable [unreachable] tests/logging/test_remote_handler.py:93: error: Statement is unreachable [unreachable] tests/logging/test_remote_handler.py:127: error: Statement is unreachable [unreachable] tests/logging/test_remote_handler.py:152: error: Statement is unreachable [unreachable] ``` * Changelog * Tweak DBAPI2 Protocol to be accepted by mypy 1.0 Some extra context in: - https://github.com/matrix-org/python-canonicaljson/pull/57 - https://github.com/python/mypy/issues/6002 - https://mypy.readthedocs.io/en/latest/common_issues.html#covariant-subtyping-of-mutable-protocol-members-is-rejected * Pull in updated canonicaljson lib so the protocol check just works * Improve comments in opentracing I tried to workaround the ignores but found it too much trouble. I think the corresponding issue is https://github.com/python/mypy/issues/12909. The mypy repo has a PR claiming to fix this (https://github.com/python/mypy/pull/14677) which might mean this gets resolved soon? * Better annotation for INTERACTIVE_AUTH_CHECKERS * Drive-by AUTH_TYPE annotation, to remove an ignore --- changelog.d/15052.misc | 1 + poetry.lock | 69 ++++++++--------- synapse/handlers/auth.py | 2 +- synapse/handlers/ui_auth/checkers.py | 18 ++++- synapse/http/matrixfederationclient.py | 2 +- synapse/logging/opentracing.py | 24 ++++-- synapse/rest/media/v1/_base.py | 9 ++- synapse/storage/engines/__init__.py | 4 +- synapse/storage/types.py | 74 ++++++++++++++++--- synapse/streams/__init__.py | 7 +- tests/handlers/test_register.py | 4 +- .../test_matrix_federation_agent.py | 11 +-- tests/http/test_proxyagent.py | 40 +++++----- tests/logging/test_remote_handler.py | 17 +++-- tests/rest/client/test_auth.py | 3 + tests/rest/client/test_third_party_rules.py | 2 +- tests/utils.py | 26 ++++++- 17 files changed, 209 insertions(+), 104 deletions(-) create mode 100644 changelog.d/15052.misc diff --git a/changelog.d/15052.misc b/changelog.d/15052.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15052.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/poetry.lock b/poetry.lock index e534b30d2b21..eb1e3d797bd6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -146,14 +146,14 @@ css = ["tinycss2 (>=1.1.0,<1.2)"] [[package]] name = "canonicaljson" -version = "1.6.4" +version = "1.6.5" description = "Canonical JSON" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "canonicaljson-1.6.4-py3-none-any.whl", hash = "sha256:55d282853b4245dbcd953fe54c39b91571813d7c44e1dbf66e3c4f97ff134a48"}, - {file = "canonicaljson-1.6.4.tar.gz", hash = "sha256:6c09b2119511f30eb1126cfcd973a10824e20f1cfd25039cde3d1218dd9c8d8f"}, + {file = "canonicaljson-1.6.5-py3-none-any.whl", hash = "sha256:806ea6f2cbb7405d20259e1c36dd1214ba5c242fa9165f5bd0bf2081f82c23fb"}, + {file = "canonicaljson-1.6.5.tar.gz", hash = "sha256:68dfc157b011e07d94bf74b5d4ccc01958584ed942d9dfd5fdd706609e81cd4b"}, ] [package.dependencies] @@ -1146,36 +1146,38 @@ files = [ [[package]] name = "mypy" -version = "0.981" +version = "1.0.0" description = "Optional static typing for Python" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "mypy-0.981-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4bc460e43b7785f78862dab78674e62ec3cd523485baecfdf81a555ed29ecfa0"}, - {file = "mypy-0.981-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:756fad8b263b3ba39e4e204ee53042671b660c36c9017412b43af210ddee7b08"}, - {file = "mypy-0.981-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a16a0145d6d7d00fbede2da3a3096dcc9ecea091adfa8da48fa6a7b75d35562d"}, - {file = "mypy-0.981-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce65f70b14a21fdac84c294cde75e6dbdabbcff22975335e20827b3b94bdbf49"}, - {file = "mypy-0.981-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e35d764784b42c3e256848fb8ed1d4292c9fc0098413adb28d84974c095b279"}, - {file = "mypy-0.981-cp310-cp310-win_amd64.whl", hash = "sha256:e53773073c864d5f5cec7f3fc72fbbcef65410cde8cc18d4f7242dea60dac52e"}, - {file = "mypy-0.981-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6ee196b1d10b8b215e835f438e06965d7a480f6fe016eddbc285f13955cca659"}, - {file = "mypy-0.981-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ad21d4c9d3673726cf986ea1d0c9fb66905258709550ddf7944c8f885f208be"}, - {file = "mypy-0.981-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d1debb09043e1f5ee845fa1e96d180e89115b30e47c5d3ce53bc967bab53f62d"}, - {file = "mypy-0.981-cp37-cp37m-win_amd64.whl", hash = "sha256:9f362470a3480165c4c6151786b5379351b790d56952005be18bdbdd4c7ce0ae"}, - {file = "mypy-0.981-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:c9e0efb95ed6ca1654951bd5ec2f3fa91b295d78bf6527e026529d4aaa1e0c30"}, - {file = "mypy-0.981-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e178eaffc3c5cd211a87965c8c0df6da91ed7d258b5fc72b8e047c3771317ddb"}, - {file = "mypy-0.981-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:06e1eac8d99bd404ed8dd34ca29673c4346e76dd8e612ea507763dccd7e13c7a"}, - {file = "mypy-0.981-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa38f82f53e1e7beb45557ff167c177802ba7b387ad017eab1663d567017c8ee"}, - {file = "mypy-0.981-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:64e1f6af81c003f85f0dfed52db632817dabb51b65c0318ffbf5ff51995bbb08"}, - {file = "mypy-0.981-cp38-cp38-win_amd64.whl", hash = "sha256:e1acf62a8c4f7c092462c738aa2c2489e275ed386320c10b2e9bff31f6f7e8d6"}, - {file = "mypy-0.981-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b6ede64e52257931315826fdbfc6ea878d89a965580d1a65638ef77cb551f56d"}, - {file = "mypy-0.981-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:eb3978b191b9fa0488524bb4ffedf2c573340e8c2b4206fc191d44c7093abfb7"}, - {file = "mypy-0.981-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:77f8fcf7b4b3cc0c74fb33ae54a4cd00bb854d65645c48beccf65fa10b17882c"}, - {file = "mypy-0.981-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64d2ce043a209a297df322eb4054dfbaa9de9e8738291706eaafda81ab2b362"}, - {file = "mypy-0.981-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ee3dbc53d4df7e6e3b1c68ac6a971d3a4fb2852bf10a05fda228721dd44fae1"}, - {file = "mypy-0.981-cp39-cp39-win_amd64.whl", hash = "sha256:8e8e49aa9cc23aa4c926dc200ce32959d3501c4905147a66ce032f05cb5ecb92"}, - {file = "mypy-0.981-py3-none-any.whl", hash = "sha256:794f385653e2b749387a42afb1e14c2135e18daeb027e0d97162e4b7031210f8"}, - {file = "mypy-0.981.tar.gz", hash = "sha256:ad77c13037d3402fbeffda07d51e3f228ba078d1c7096a73759c9419ea031bf4"}, + {file = "mypy-1.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e0626db16705ab9f7fa6c249c017c887baf20738ce7f9129da162bb3075fc1af"}, + {file = "mypy-1.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1ace23f6bb4aec4604b86c4843276e8fa548d667dbbd0cb83a3ae14b18b2db6c"}, + {file = "mypy-1.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87edfaf344c9401942883fad030909116aa77b0fa7e6e8e1c5407e14549afe9a"}, + {file = "mypy-1.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0ab090d9240d6b4e99e1fa998c2d0aa5b29fc0fb06bd30e7ad6183c95fa07593"}, + {file = "mypy-1.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:7cc2c01dfc5a3cbddfa6c13f530ef3b95292f926329929001d45e124342cd6b7"}, + {file = "mypy-1.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:14d776869a3e6c89c17eb943100f7868f677703c8a4e00b3803918f86aafbc52"}, + {file = "mypy-1.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bb2782a036d9eb6b5a6efcdda0986774bf798beef86a62da86cb73e2a10b423d"}, + {file = "mypy-1.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cfca124f0ac6707747544c127880893ad72a656e136adc935c8600740b21ff5"}, + {file = "mypy-1.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:8845125d0b7c57838a10fd8925b0f5f709d0e08568ce587cc862aacce453e3dd"}, + {file = "mypy-1.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b1b9e1ed40544ef486fa8ac022232ccc57109f379611633ede8e71630d07d2"}, + {file = "mypy-1.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c7cf862aef988b5fbaa17764ad1d21b4831436701c7d2b653156a9497d92c83c"}, + {file = "mypy-1.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd187d92b6939617f1168a4fe68f68add749902c010e66fe574c165c742ed88"}, + {file = "mypy-1.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4e5175026618c178dfba6188228b845b64131034ab3ba52acaffa8f6c361f805"}, + {file = "mypy-1.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:2f6ac8c87e046dc18c7d1d7f6653a66787a4555085b056fe2d599f1f1a2a2d21"}, + {file = "mypy-1.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7306edca1c6f1b5fa0bc9aa645e6ac8393014fa82d0fa180d0ebc990ebe15964"}, + {file = "mypy-1.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3cfad08f16a9c6611e6143485a93de0e1e13f48cfb90bcad7d5fde1c0cec3d36"}, + {file = "mypy-1.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67cced7f15654710386e5c10b96608f1ee3d5c94ca1da5a2aad5889793a824c1"}, + {file = "mypy-1.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a86b794e8a56ada65c573183756eac8ac5b8d3d59daf9d5ebd72ecdbb7867a43"}, + {file = "mypy-1.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:50979d5efff8d4135d9db293c6cb2c42260e70fb010cbc697b1311a4d7a39ddb"}, + {file = "mypy-1.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3ae4c7a99e5153496243146a3baf33b9beff714464ca386b5f62daad601d87af"}, + {file = "mypy-1.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e398652d005a198a7f3c132426b33c6b85d98aa7dc852137a2a3be8890c4072"}, + {file = "mypy-1.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be78077064d016bc1b639c2cbcc5be945b47b4261a4f4b7d8923f6c69c5c9457"}, + {file = "mypy-1.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:92024447a339400ea00ac228369cd242e988dd775640755fa4ac0c126e49bb74"}, + {file = "mypy-1.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:fe523fcbd52c05040c7bee370d66fee8373c5972171e4fbc323153433198592d"}, + {file = "mypy-1.0.0-py3-none-any.whl", hash = "sha256:2efa963bdddb27cb4a0d42545cd137a8d2b883bd181bbc4525b568ef6eca258f"}, + {file = "mypy-1.0.0.tar.gz", hash = "sha256:f34495079c8d9da05b183f9f7daec2878280c2ad7cc81da686ef0b484cea2ecf"}, ] [package.dependencies] @@ -1186,6 +1188,7 @@ typing-extensions = ">=3.10" [package.extras] dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] python2 = ["typed-ast (>=1.4.0,<2)"] reports = ["lxml"] @@ -1203,18 +1206,18 @@ files = [ [[package]] name = "mypy-zope" -version = "0.3.11" +version = "0.9.0" description = "Plugin for mypy to support zope interfaces" category = "dev" optional = false python-versions = "*" files = [ - {file = "mypy-zope-0.3.11.tar.gz", hash = "sha256:d4255f9f04d48c79083bbd4e2fea06513a6ac7b8de06f8c4ce563fd85142ca05"}, - {file = "mypy_zope-0.3.11-py3-none-any.whl", hash = "sha256:ec080a6508d1f7805c8d2054f9fdd13c849742ce96803519e1fdfa3d3cab7140"}, + {file = "mypy-zope-0.9.0.tar.gz", hash = "sha256:88bf6cd056e38b338e6956055958a7805b4ff84404ccd99e29883a3647a1aeb3"}, + {file = "mypy_zope-0.9.0-py3-none-any.whl", hash = "sha256:e1bb4b57084f76ff8a154a3e07880a1af2ac6536c491dad4b143d529f72c5d15"}, ] [package.dependencies] -mypy = "0.981" +mypy = "1.0.0" "zope.interface" = "*" "zope.schema" = "*" @@ -1705,7 +1708,7 @@ files = [ cffi = ">=1.4.1" [package.extras] -docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] +docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"] tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 57a6854b1e08..cf12b55d21e9 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -201,7 +201,7 @@ def __init__(self, hs: "HomeServer"): for auth_checker_class in INTERACTIVE_AUTH_CHECKERS: inst = auth_checker_class(hs) if inst.is_enabled(): - self.checkers[inst.AUTH_TYPE] = inst # type: ignore + self.checkers[inst.AUTH_TYPE] = inst self.bcrypt_rounds = hs.config.registration.bcrypt_rounds diff --git a/synapse/handlers/ui_auth/checkers.py b/synapse/handlers/ui_auth/checkers.py index 332edcca24e5..78a75bfed6ee 100644 --- a/synapse/handlers/ui_auth/checkers.py +++ b/synapse/handlers/ui_auth/checkers.py @@ -13,7 +13,8 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Any +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, ClassVar, Sequence, Type from twisted.web.client import PartialDownloadError @@ -27,19 +28,28 @@ logger = logging.getLogger(__name__) -class UserInteractiveAuthChecker: +class UserInteractiveAuthChecker(ABC): """Abstract base class for an interactive auth checker""" - def __init__(self, hs: "HomeServer"): + # This should really be an "abstract class property", i.e. it should + # be an error to instantiate a subclass that doesn't specify an AUTH_TYPE. + # But calling this a `ClassVar` is simpler than a decorator stack of + # @property @abstractmethod and @classmethod (if that's even the right order). + AUTH_TYPE: ClassVar[str] + + def __init__(self, hs: "HomeServer"): # noqa: B027 pass + @abstractmethod def is_enabled(self) -> bool: """Check if the configuration of the homeserver allows this checker to work Returns: True if this login type is enabled. """ + raise NotImplementedError() + @abstractmethod async def check_auth(self, authdict: dict, clientip: str) -> Any: """Given the authentication dict from the client, attempt to check this step @@ -304,7 +314,7 @@ async def check_auth(self, authdict: dict, clientip: str) -> Any: ) -INTERACTIVE_AUTH_CHECKERS = [ +INTERACTIVE_AUTH_CHECKERS: Sequence[Type[UserInteractiveAuthChecker]] = [ DummyAuthChecker, TermsAuthChecker, RecaptchaAuthChecker, diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index b92f1d3d1af5..312aab4dcc7b 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -1267,7 +1267,7 @@ async def get_file( def _flatten_response_never_received(e: BaseException) -> str: if hasattr(e, "reasons"): reasons = ", ".join( - _flatten_response_never_received(f.value) for f in e.reasons # type: ignore[attr-defined] + _flatten_response_never_received(f.value) for f in e.reasons ) return "%s:[%s]" % (type(e).__name__, reasons) diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 6c7cf1b29487..5aed71262fbd 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -188,7 +188,7 @@ def set_fates(clotho, lachesis, atropos, father="Zues", mother="Themis"): ) import attr -from typing_extensions import ParamSpec +from typing_extensions import Concatenate, ParamSpec from twisted.internet import defer from twisted.web.http import Request @@ -445,7 +445,7 @@ def init_tracer(hs: "HomeServer") -> None: opentracing = None # type: ignore[assignment] return - if not opentracing or not JaegerConfig: + if opentracing is None or JaegerConfig is None: raise ConfigError( "The server has been configured to use opentracing but opentracing is not " "installed." @@ -872,7 +872,7 @@ def extract_text_map(carrier: Dict[str, str]) -> Optional["opentracing.SpanConte def _custom_sync_async_decorator( func: Callable[P, R], - wrapping_logic: Callable[[Callable[P, R], Any, Any], ContextManager[None]], + wrapping_logic: Callable[Concatenate[Callable[P, R], P], ContextManager[None]], ) -> Callable[P, R]: """ Decorates a function that is sync or async (coroutines), or that returns a Twisted @@ -902,10 +902,14 @@ def _wrapping_logic(func: Callable[P, R], *args: P.args, **kwargs: P.kwargs) -> """ if inspect.iscoroutinefunction(func): - + # In this branch, R = Awaitable[RInner], for some other type RInner @wraps(func) - async def _wrapper(*args: P.args, **kwargs: P.kwargs) -> R: + async def _wrapper( + *args: P.args, **kwargs: P.kwargs + ) -> Any: # Return type is RInner with wrapping_logic(func, *args, **kwargs): + # type-ignore: func() returns R, but mypy doesn't know that R is + # Awaitable here. return await func(*args, **kwargs) # type: ignore[misc] else: @@ -972,7 +976,11 @@ def _decorator(func: Callable[P, R]) -> Callable[P, R]: if not opentracing: return func - return _custom_sync_async_decorator(func, _wrapping_logic) + # type-ignore: mypy seems to be confused by the ParamSpecs here. + # I think the problem is https://github.com/python/mypy/issues/12909 + return _custom_sync_async_decorator( + func, _wrapping_logic # type: ignore[arg-type] + ) return _decorator @@ -1018,7 +1026,9 @@ def _wrapping_logic( set_tag(SynapseTags.FUNC_KWARGS, str(kwargs)) yield - return _custom_sync_async_decorator(func, _wrapping_logic) + # type-ignore: mypy seems to be confused by the ParamSpecs here. + # I think the problem is https://github.com/python/mypy/issues/12909 + return _custom_sync_async_decorator(func, _wrapping_logic) # type: ignore[arg-type] @contextlib.contextmanager diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index d30878f70496..6e035afcce59 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -16,6 +16,7 @@ import logging import os import urllib +from abc import ABC, abstractmethod from types import TracebackType from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type @@ -284,13 +285,14 @@ async def respond_with_responder( finish_request(request) -class Responder: +class Responder(ABC): """Represents a response that can be streamed to the requester. Responder is a context manager which *must* be used, so that any resources held can be cleaned up. """ + @abstractmethod def write_to_consumer(self, consumer: IConsumer) -> Awaitable: """Stream response into consumer @@ -300,11 +302,12 @@ def write_to_consumer(self, consumer: IConsumer) -> Awaitable: Returns: Resolves once the response has finished being written """ + raise NotImplementedError() - def __enter__(self) -> None: + def __enter__(self) -> None: # noqa: B027 pass - def __exit__( + def __exit__( # noqa: B027 self, exc_type: Optional[Type[BaseException]], exc_val: Optional[BaseException], diff --git a/synapse/storage/engines/__init__.py b/synapse/storage/engines/__init__.py index a182e8a098b1..d1ccb7390a02 100644 --- a/synapse/storage/engines/__init__.py +++ b/synapse/storage/engines/__init__.py @@ -25,7 +25,7 @@ except ImportError: class PostgresEngine(BaseDatabaseEngine): # type: ignore[no-redef] - def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[misc] + def __new__(cls, *args: object, **kwargs: object) -> NoReturn: raise RuntimeError( f"Cannot create {cls.__name__} -- psycopg2 module is not installed" ) @@ -36,7 +36,7 @@ def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[m except ImportError: class Sqlite3Engine(BaseDatabaseEngine): # type: ignore[no-redef] - def __new__(cls, *args: object, **kwargs: object) -> NoReturn: # type: ignore[misc] + def __new__(cls, *args: object, **kwargs: object) -> NoReturn: raise RuntimeError( f"Cannot create {cls.__name__} -- sqlite3 module is not installed" ) diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 0031df1e0649..56a00485393d 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -12,7 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. from types import TracebackType -from typing import Any, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, Union +from typing import ( + Any, + Callable, + Iterator, + List, + Mapping, + Optional, + Sequence, + Tuple, + Type, + Union, +) from typing_extensions import Protocol @@ -112,15 +123,35 @@ class DBAPI2Module(Protocol): # extends from this hierarchy. See # https://docs.python.org/3/library/sqlite3.html?highlight=sqlite3#exceptions # https://www.postgresql.org/docs/current/errcodes-appendix.html#ERRCODES-TABLE - Warning: Type[Exception] - Error: Type[Exception] + # + # Note: rather than + # x: T + # we write + # @property + # def x(self) -> T: ... + # which expresses that the protocol attribute `x` is read-only. The mypy docs + # https://mypy.readthedocs.io/en/latest/common_issues.html#covariant-subtyping-of-mutable-protocol-members-is-rejected + # explain why this is necessary for safety. TL;DR: we shouldn't be able to write + # to `x`, only read from it. See also https://github.com/python/mypy/issues/6002 . + @property + def Warning(self) -> Type[Exception]: + ... + + @property + def Error(self) -> Type[Exception]: + ... # Errors are divided into `InterfaceError`s (something went wrong in the database # driver) and `DatabaseError`s (something went wrong in the database). These are # both subclasses of `Error`, but we can't currently express this in type # annotations due to https://github.com/python/mypy/issues/8397 - InterfaceError: Type[Exception] - DatabaseError: Type[Exception] + @property + def InterfaceError(self) -> Type[Exception]: + ... + + @property + def DatabaseError(self) -> Type[Exception]: + ... # Everything below is a subclass of `DatabaseError`. @@ -128,7 +159,9 @@ class DBAPI2Module(Protocol): # - An integer was too big for its data type. # - An invalid date time was provided. # - A string contained a null code point. - DataError: Type[Exception] + @property + def DataError(self) -> Type[Exception]: + ... # Roughly: something went wrong in the database, but it's not within the application # programmer's control. Examples: @@ -138,28 +171,45 @@ class DBAPI2Module(Protocol): # - A serialisation failure occurred. # - The database ran out of resources, such as storage, memory, connections, etc. # - The database encountered an error from the operating system. - OperationalError: Type[Exception] + @property + def OperationalError(self) -> Type[Exception]: + ... # Roughly: we've given the database data which breaks a rule we asked it to enforce. # Examples: # - Stop, criminal scum! You violated the foreign key constraint # - Also check constraints, non-null constraints, etc. - IntegrityError: Type[Exception] + @property + def IntegrityError(self) -> Type[Exception]: + ... # Roughly: something went wrong within the database server itself. - InternalError: Type[Exception] + @property + def InternalError(self) -> Type[Exception]: + ... # Roughly: the application did something silly that needs to be fixed. Examples: # - We don't have permissions to do something. # - We tried to create a table with duplicate column names. # - We tried to use a reserved name. # - We referred to a column that doesn't exist. - ProgrammingError: Type[Exception] + @property + def ProgrammingError(self) -> Type[Exception]: + ... # Roughly: we've tried to do something that this database doesn't support. - NotSupportedError: Type[Exception] + @property + def NotSupportedError(self) -> Type[Exception]: + ... - def connect(self, **parameters: object) -> Connection: + # We originally wrote + # def connect(self, *args, **kwargs) -> Connection: ... + # But mypy doesn't seem to like that because sqlite3.connect takes a mandatory + # positional argument. We can't make that part of the signature though, because + # psycopg2.connect doesn't have a mandatory positional argument. Instead, we use + # the following slightly unusual workaround. + @property + def connect(self) -> Callable[..., Connection]: ... diff --git a/synapse/streams/__init__.py b/synapse/streams/__init__.py index c6c8a0315c9b..8a48ffc48ddf 100644 --- a/synapse/streams/__init__.py +++ b/synapse/streams/__init__.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +from abc import ABC, abstractmethod from typing import Generic, List, Optional, Tuple, TypeVar from synapse.types import StrCollection, UserID @@ -22,7 +22,8 @@ R = TypeVar("R") -class EventSource(Generic[K, R]): +class EventSource(ABC, Generic[K, R]): + @abstractmethod async def get_new_events( self, user: UserID, @@ -32,4 +33,4 @@ async def get_new_events( is_guest: bool, explicit_room_id: Optional[str] = None, ) -> Tuple[List[R], K]: - ... + raise NotImplementedError() diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 782ef09cf4ab..1db99b3c0028 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -62,7 +62,7 @@ async def check_registration_for_spam( request_info: Collection[Tuple[str, str]], auth_provider_id: Optional[str], ) -> RegistrationBehaviour: - pass + return RegistrationBehaviour.ALLOW class DenyAll(TestSpamChecker): @@ -111,7 +111,7 @@ async def check_registration_for_spam( username: Optional[str], request_info: Collection[Tuple[str, str]], ) -> RegistrationBehaviour: - pass + return RegistrationBehaviour.ALLOW class LegacyAllowAll(TestLegacyRegistrationSpamChecker): diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index acfdcd3bca25..d27422515c8f 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -63,7 +63,7 @@ get_test_ca_cert_file, ) from tests.server import FakeTransport, ThreadedMemoryReactorClock -from tests.utils import default_config +from tests.utils import checked_cast, default_config logger = logging.getLogger(__name__) @@ -146,8 +146,10 @@ def _make_connection( # # Normally this would be done by the TCP socket code in Twisted, but we are # stubbing that out here. - client_protocol = client_factory.buildProtocol(dummy_address) - assert isinstance(client_protocol, _WrappingProtocol) + # NB: we use a checked_cast here to workaround https://github.com/Shoobx/mypy-zope/issues/91) + client_protocol = checked_cast( + _WrappingProtocol, client_factory.buildProtocol(dummy_address) + ) client_protocol.makeConnection( FakeTransport(server_protocol, self.reactor, client_protocol) ) @@ -446,7 +448,6 @@ def _do_get_via_proxy( server_ssl_protocol = _wrap_server_factory_for_tls( _get_test_protocol_factory() ).buildProtocol(dummy_address) - assert isinstance(server_ssl_protocol, TLSMemoryBIOProtocol) # Tell the HTTP server to send outgoing traffic back via the proxy's transport. proxy_server_transport = proxy_server.transport @@ -1529,7 +1530,7 @@ def _check_logcontext(context: LoggingContextOrSentinel) -> None: def _wrap_server_factory_for_tls( factory: IProtocolFactory, sanlist: Optional[List[bytes]] = None -) -> IProtocolFactory: +) -> TLSMemoryBIOFactory: """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory The resultant factory will create a TLS server which presents a certificate signed by our test CA, valid for the domains in `sanlist` diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index a817940730fd..22fdc7f5f23f 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -43,6 +43,7 @@ ) from tests.server import FakeTransport, ThreadedMemoryReactorClock from tests.unittest import TestCase +from tests.utils import checked_cast logger = logging.getLogger(__name__) @@ -620,7 +621,6 @@ def _do_https_request_via_proxy( server_ssl_protocol = _wrap_server_factory_for_tls( _get_test_protocol_factory() ).buildProtocol(dummy_address) - assert isinstance(server_ssl_protocol, TLSMemoryBIOProtocol) # Tell the HTTP server to send outgoing traffic back via the proxy's transport. proxy_server_transport = proxy_server.transport @@ -757,12 +757,14 @@ def test_https_request_via_uppercase_proxy_with_blacklist(self) -> None: assert isinstance(proxy_server, HTTPChannel) # fish the transports back out so that we can do the old switcheroo - s2c_transport = proxy_server.transport - assert isinstance(s2c_transport, FakeTransport) - client_protocol = s2c_transport.other - assert isinstance(client_protocol, _WrappingProtocol) - c2s_transport = client_protocol.transport - assert isinstance(c2s_transport, FakeTransport) + # To help mypy out with the various Protocols and wrappers and mocks, we do + # some explicit casting. Without the casts, we hit the bug I reported at + # https://github.com/Shoobx/mypy-zope/issues/91 . + # We also double-checked these casts at runtime (test-time) because I found it + # quite confusing to deduce these types in the first place! + s2c_transport = checked_cast(FakeTransport, proxy_server.transport) + client_protocol = checked_cast(_WrappingProtocol, s2c_transport.other) + c2s_transport = checked_cast(FakeTransport, client_protocol.transport) # the FakeTransport is async, so we need to pump the reactor self.reactor.advance(0) @@ -822,9 +824,9 @@ def test_https_request_via_uppercase_proxy_with_blacklist(self) -> None: @patch.dict(os.environ, {"http_proxy": "proxy.com:8888"}) def test_proxy_with_no_scheme(self) -> None: http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) - assert isinstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) - self.assertEqual(http_proxy_agent.http_proxy_endpoint._hostStr, "proxy.com") - self.assertEqual(http_proxy_agent.http_proxy_endpoint._port, 8888) + proxy_ep = checked_cast(HostnameEndpoint, http_proxy_agent.http_proxy_endpoint) + self.assertEqual(proxy_ep._hostStr, "proxy.com") + self.assertEqual(proxy_ep._port, 8888) @patch.dict(os.environ, {"http_proxy": "socks://proxy.com:8888"}) def test_proxy_with_unsupported_scheme(self) -> None: @@ -834,25 +836,21 @@ def test_proxy_with_unsupported_scheme(self) -> None: @patch.dict(os.environ, {"http_proxy": "http://proxy.com:8888"}) def test_proxy_with_http_scheme(self) -> None: http_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) - assert isinstance(http_proxy_agent.http_proxy_endpoint, HostnameEndpoint) - self.assertEqual(http_proxy_agent.http_proxy_endpoint._hostStr, "proxy.com") - self.assertEqual(http_proxy_agent.http_proxy_endpoint._port, 8888) + proxy_ep = checked_cast(HostnameEndpoint, http_proxy_agent.http_proxy_endpoint) + self.assertEqual(proxy_ep._hostStr, "proxy.com") + self.assertEqual(proxy_ep._port, 8888) @patch.dict(os.environ, {"http_proxy": "https://proxy.com:8888"}) def test_proxy_with_https_scheme(self) -> None: https_proxy_agent = ProxyAgent(self.reactor, use_proxy=True) - assert isinstance(https_proxy_agent.http_proxy_endpoint, _WrapperEndpoint) - self.assertEqual( - https_proxy_agent.http_proxy_endpoint._wrappedEndpoint._hostStr, "proxy.com" - ) - self.assertEqual( - https_proxy_agent.http_proxy_endpoint._wrappedEndpoint._port, 8888 - ) + proxy_ep = checked_cast(_WrapperEndpoint, https_proxy_agent.http_proxy_endpoint) + self.assertEqual(proxy_ep._wrappedEndpoint._hostStr, "proxy.com") + self.assertEqual(proxy_ep._wrappedEndpoint._port, 8888) def _wrap_server_factory_for_tls( factory: IProtocolFactory, sanlist: Optional[List[bytes]] = None -) -> IProtocolFactory: +) -> TLSMemoryBIOFactory: """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory The resultant factory will create a TLS server which presents a certificate diff --git a/tests/logging/test_remote_handler.py b/tests/logging/test_remote_handler.py index c08954d887cb..5191e31a8ae8 100644 --- a/tests/logging/test_remote_handler.py +++ b/tests/logging/test_remote_handler.py @@ -21,6 +21,7 @@ from tests.logging import LoggerCleanupMixin from tests.server import FakeTransport, get_clock from tests.unittest import TestCase +from tests.utils import checked_cast def connect_logging_client( @@ -56,8 +57,8 @@ def test_log_output(self) -> None: client, server = connect_logging_client(self.reactor, 0) # Trigger data being sent - assert isinstance(client.transport, FakeTransport) - client.transport.flush() + client_transport = checked_cast(FakeTransport, client.transport) + client_transport.flush() # One log message, with a single trailing newline logs = server.data.decode("utf8").splitlines() @@ -89,8 +90,8 @@ def test_log_backpressure_debug(self) -> None: # Allow the reconnection client, server = connect_logging_client(self.reactor, 0) - assert isinstance(client.transport, FakeTransport) - client.transport.flush() + client_transport = checked_cast(FakeTransport, client.transport) + client_transport.flush() # Only the 7 infos made it through, the debugs were elided logs = server.data.splitlines() @@ -123,8 +124,8 @@ def test_log_backpressure_info(self) -> None: # Allow the reconnection client, server = connect_logging_client(self.reactor, 0) - assert isinstance(client.transport, FakeTransport) - client.transport.flush() + client_transport = checked_cast(FakeTransport, client.transport) + client_transport.flush() # The 10 warnings made it through, the debugs and infos were elided logs = server.data.splitlines() @@ -148,8 +149,8 @@ def test_log_backpressure_cut_middle(self) -> None: # Allow the reconnection client, server = connect_logging_client(self.reactor, 0) - assert isinstance(client.transport, FakeTransport) - client.transport.flush() + client_transport = checked_cast(FakeTransport, client.transport) + client_transport.flush() # The first five and last five warnings made it through, the debugs and # infos were elided diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index 208ec4482970..f4e1e7de4352 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -43,6 +43,9 @@ def __init__(self, hs: HomeServer) -> None: super().__init__(hs) self.recaptcha_attempts: List[Tuple[dict, str]] = [] + def is_enabled(self) -> bool: + return True + def check_auth(self, authdict: dict, clientip: str) -> Any: self.recaptcha_attempts.append((authdict, clientip)) return succeed(True) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 3325d43a2fa4..5fa3440691e8 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -425,7 +425,7 @@ def test_sent_event_end_up_in_room_state(self) -> None: async def test_fn( event: EventBase, state_events: StateMap[EventBase] ) -> Tuple[bool, Optional[JsonDict]]: - if event.is_state and event.type == EventTypes.PowerLevels: + if event.is_state() and event.type == EventTypes.PowerLevels: await api.create_and_send_event_into_room( { "room_id": event.room_id, diff --git a/tests/utils.py b/tests/utils.py index 15fabbc2d01e..a0ac11bc5cd2 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -15,7 +15,7 @@ import atexit import os -from typing import Any, Callable, Dict, List, Tuple, Union, overload +from typing import Any, Callable, Dict, List, Tuple, Type, TypeVar, Union, overload import attr from typing_extensions import Literal, ParamSpec @@ -341,3 +341,27 @@ async def create_room(hs: HomeServer, room_id: str, creator_id: str) -> None: context = await unpersisted_context.persist(event) await persistence_store.persist_event(event, context) + + +T = TypeVar("T") + + +def checked_cast(type: Type[T], x: object) -> T: + """A version of typing.cast that is checked at runtime. + + We have our own function for this for two reasons: + + 1. typing.cast itself is deliberately a no-op at runtime, see + https://docs.python.org/3/library/typing.html#typing.cast + 2. To help workaround a mypy-zope bug https://github.com/Shoobx/mypy-zope/issues/91 + where mypy would erroneously consider `isinstance(x, type)` to be false in all + circumstances. + + For this to make sense, `T` needs to be something that `isinstance` can check; see + https://docs.python.org/3/library/functions.html?highlight=isinstance#isinstance + https://docs.python.org/3/glossary.html#term-abstract-base-class + https://docs.python.org/3/library/typing.html#typing.runtime_checkable + for more details. + """ + assert isinstance(x, type) + return x From ad1f3fa8e10d99ca49acd7a351a8e695e1412d64 Mon Sep 17 00:00:00 2001 From: ZAID BIN TARIQ <57444558+thezaidbintariq@users.noreply.github.com> Date: Thu, 16 Feb 2023 23:20:02 +0500 Subject: [PATCH 203/344] Document how to start Synapse with Poetry (#14892) * Add Start Synapse with Poetry * Create 14892.doc * Apply suggestions from code review Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> * Update docs/workers.md --------- Co-authored-by: David Robertson Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/14892.doc | 1 + docs/workers.md | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 changelog.d/14892.doc diff --git a/changelog.d/14892.doc b/changelog.d/14892.doc new file mode 100644 index 000000000000..2bc3ad06c6b6 --- /dev/null +++ b/changelog.d/14892.doc @@ -0,0 +1 @@ +Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. diff --git a/docs/workers.md b/docs/workers.md index bc66f0e1bce5..2eb970ffa6a0 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -160,7 +160,18 @@ recommend the use of `systemd` where available: for information on setting up [Systemd with Workers](systemd-with-workers/). To use `synctl`, see [Using synctl with Workers](synctl_workers.md). +## Start Synapse with Poetry +The following applies to Synapse installations that have been installed from source using `poetry`. + +You can start the main Synapse process with Poetry by running the following command: +```console +poetry run synapse_homeserver -c [your homeserver.yaml] +``` +For worker setups, you can run the following command +```console +poetry run synapse_worker -c [your worker.yaml] +``` ## Available worker applications ### `synapse.app.generic_worker` From 4f4f27e57fdab1d7cc6e275b8acabc785952205e Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 17 Feb 2023 09:40:32 +0000 Subject: [PATCH 204/344] Mitigate a race where /make_join could 403 for restricted rooms (#15080) Previously, when creating a join event in /make_join, we would decide whether to include additional fields to satisfy restricted room checks based on the current state of the room. Then, when building the event, we would capture the forward extremities of the room to use as prev events. This is subject to race conditions. For example, when leaving and rejoining a room, the following sequence of events leads to a misleading 403 response: 1. /make_join reads the current state of the room and sees that the user is still in the room. It decides to omit the field required for restricted room joins. 2. The leave event is persisted and the room's forward extremities are updated. 3. /make_join builds the event, using the post-leave forward extremities. The event then fails the restricted room checks. To mitigate the race, we move the read of the forward extremities closer to the read of the current state. Ideally, we would compute the state based off the chosen prev events, but that can involve state resolution, which is expensive. Signed-off-by: Sean Quah --- changelog.d/15080.bugfix | 1 + synapse/handlers/federation.py | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15080.bugfix diff --git a/changelog.d/15080.bugfix b/changelog.d/15080.bugfix new file mode 100644 index 000000000000..965d0b921e0a --- /dev/null +++ b/changelog.d/15080.bugfix @@ -0,0 +1 @@ +Reduce the likelihood of a rare race condition where rejoining a restricted room over federation would fail. diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1d0f6bcd6f8b..5f2057269dac 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -952,7 +952,20 @@ async def on_make_join_request( # # Note that this requires the /send_join request to come back to the # same server. + prev_event_ids = None if room_version.msc3083_join_rules: + # Note that the room's state can change out from under us and render our + # nice join rules-conformant event non-conformant by the time we build the + # event. When this happens, our validation at the end fails and we respond + # to the requesting server with a 403, which is misleading — it indicates + # that the user is not allowed to join the room and the joining server + # should not bother retrying via this homeserver or any others, when + # in fact we've just messed up with building the event. + # + # To reduce the likelihood of this race, we capture the forward extremities + # of the room (prev_event_ids) just before fetching the current state, and + # hope that the state we fetch corresponds to the prev events we chose. + prev_event_ids = await self.store.get_prev_events_for_room(room_id) state_ids = await self._state_storage_controller.get_current_state_ids( room_id ) @@ -994,7 +1007,8 @@ async def on_make_join_request( event, unpersisted_context, ) = await self.event_creation_handler.create_new_client_event( - builder=builder + builder=builder, + prev_event_ids=prev_event_ids, ) except SynapseError as e: logger.warning("Failed to create join to %s because %s", room_id, e) From 61bfcd669ae596a8df940f434e3e2335059100b1 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Fri, 17 Feb 2023 14:54:55 +0100 Subject: [PATCH 205/344] Add account data to export command (#14969) * Add account data to to export command * newsfile * remove not needed function * update newsfile * adopt #14973 --- changelog.d/14969.feature | 1 + docs/usage/administration/admin_faq.md | 3 ++ synapse/app/admin_cmd.py | 15 +++++++- synapse/handlers/admin.py | 49 ++++++++++++++++++-------- tests/handlers/test_admin.py | 27 ++++++++++++++ 5 files changed, 79 insertions(+), 16 deletions(-) create mode 100644 changelog.d/14969.feature diff --git a/changelog.d/14969.feature b/changelog.d/14969.feature new file mode 100644 index 000000000000..a4680ef9c89b --- /dev/null +++ b/changelog.d/14969.feature @@ -0,0 +1 @@ +Add account data to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.78/usage/administration/admin_faq.html#how-can-i-export-user-data). \ No newline at end of file diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md index 7a2774119964..925e1d175e64 100644 --- a/docs/usage/administration/admin_faq.md +++ b/docs/usage/administration/admin_faq.md @@ -71,6 +71,9 @@ output-directory │ ├───invite_state │ └───knock_state └───user_data + ├───account_data + │ ├───global + │ └─── ├───connections ├───devices └───profile diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index fe7afb94755e..ad51f33165e3 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -17,7 +17,7 @@ import os import sys import tempfile -from typing import List, Optional +from typing import List, Mapping, Optional from twisted.internet import defer, task @@ -222,6 +222,19 @@ def write_connections(self, connections: List[JsonDict]) -> None: with open(connection_file, "a") as f: print(json.dumps(connection), file=f) + def write_account_data( + self, file_name: str, account_data: Mapping[str, JsonDict] + ) -> None: + account_data_directory = os.path.join( + self.base_directory, "user_data", "account_data" + ) + os.makedirs(account_data_directory, exist_ok=True) + + account_data_file = os.path.join(account_data_directory, file_name) + + with open(account_data_file, "a") as f: + print(json.dumps(account_data), file=f) + def finished(self) -> str: return self.base_directory diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index b03c214b145a..8b7760b2cc07 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -14,7 +14,7 @@ import abc import logging -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set +from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Set from synapse.api.constants import Direction, Membership from synapse.events import EventBase @@ -29,7 +29,7 @@ class AdminHandler: def __init__(self, hs: "HomeServer"): - self.store = hs.get_datastores().main + self._store = hs.get_datastores().main self._device_handler = hs.get_device_handler() self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state @@ -38,7 +38,7 @@ def __init__(self, hs: "HomeServer"): async def get_whois(self, user: UserID) -> JsonDict: connections = [] - sessions = await self.store.get_user_ip_and_agents(user) + sessions = await self._store.get_user_ip_and_agents(user) for session in sessions: connections.append( { @@ -57,7 +57,7 @@ async def get_whois(self, user: UserID) -> JsonDict: async def get_user(self, user: UserID) -> Optional[JsonDict]: """Function to get user details""" - user_info_dict = await self.store.get_user_by_id(user.to_string()) + user_info_dict = await self._store.get_user_by_id(user.to_string()) if user_info_dict is None: return None @@ -89,11 +89,11 @@ async def get_user(self, user: UserID) -> Optional[JsonDict]: } # Add additional user metadata - profile = await self.store.get_profileinfo(user.localpart) - threepids = await self.store.user_get_threepids(user.to_string()) + profile = await self._store.get_profileinfo(user.localpart) + threepids = await self._store.user_get_threepids(user.to_string()) external_ids = [ ({"auth_provider": auth_provider, "external_id": external_id}) - for auth_provider, external_id in await self.store.get_external_ids_by_user( + for auth_provider, external_id in await self._store.get_external_ids_by_user( user.to_string() ) ] @@ -101,7 +101,7 @@ async def get_user(self, user: UserID) -> Optional[JsonDict]: user_info_dict["avatar_url"] = profile.avatar_url user_info_dict["threepids"] = threepids user_info_dict["external_ids"] = external_ids - user_info_dict["erased"] = await self.store.is_user_erased(user.to_string()) + user_info_dict["erased"] = await self._store.is_user_erased(user.to_string()) return user_info_dict @@ -117,7 +117,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> The returned value is that returned by `writer.finished()`. """ # Get all rooms the user is in or has been in - rooms = await self.store.get_rooms_for_local_user_where_membership_is( + rooms = await self._store.get_rooms_for_local_user_where_membership_is( user_id, membership_list=( Membership.JOIN, @@ -131,7 +131,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> # We only try and fetch events for rooms the user has been in. If # they've been e.g. invited to a room without joining then we handle # those separately. - rooms_user_has_been_in = await self.store.get_rooms_user_has_been_in(user_id) + rooms_user_has_been_in = await self._store.get_rooms_user_has_been_in(user_id) for index, room in enumerate(rooms): room_id = room.room_id @@ -140,7 +140,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> "[%s] Handling room %s, %d/%d", user_id, room_id, index + 1, len(rooms) ) - forgotten = await self.store.did_forget(user_id, room_id) + forgotten = await self._store.did_forget(user_id, room_id) if forgotten: logger.info("[%s] User forgot room %d, ignoring", user_id, room_id) continue @@ -152,14 +152,14 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> if room.membership == Membership.INVITE: event_id = room.event_id - invite = await self.store.get_event(event_id, allow_none=True) + invite = await self._store.get_event(event_id, allow_none=True) if invite: invited_state = invite.unsigned["invite_room_state"] writer.write_invite(room_id, invite, invited_state) if room.membership == Membership.KNOCK: event_id = room.event_id - knock = await self.store.get_event(event_id, allow_none=True) + knock = await self._store.get_event(event_id, allow_none=True) if knock: knock_state = knock.unsigned["knock_room_state"] writer.write_knock(room_id, knock, knock_state) @@ -170,7 +170,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> # were joined. We estimate that point by looking at the # stream_ordering of the last membership if it wasn't a join. if room.membership == Membership.JOIN: - stream_ordering = self.store.get_room_max_stream_ordering() + stream_ordering = self._store.get_room_max_stream_ordering() else: stream_ordering = room.stream_ordering @@ -197,7 +197,7 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> # events that we have and then filtering, this isn't the most # efficient method perhaps but it does guarantee we get everything. while True: - events, _ = await self.store.paginate_room_events( + events, _ = await self._store.paginate_room_events( room_id, from_key, to_key, limit=100, direction=Direction.FORWARDS ) if not events: @@ -263,6 +263,13 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> connections["devices"][""]["sessions"][0]["connections"] ) + # Get all account data the user has global and in rooms + global_data = await self._store.get_global_account_data_for_user(user_id) + by_room_data = await self._store.get_room_account_data_for_user(user_id) + writer.write_account_data("global", global_data) + for room_id in by_room_data: + writer.write_account_data(room_id, by_room_data[room_id]) + return writer.finished() @@ -340,6 +347,18 @@ def write_connections(self, connections: List[JsonDict]) -> None: """ raise NotImplementedError() + @abc.abstractmethod + def write_account_data( + self, file_name: str, account_data: Mapping[str, JsonDict] + ) -> None: + """Write the account data of a user. + + Args: + file_name: file name to write data + account_data: mapping of global or room account_data + """ + raise NotImplementedError() + @abc.abstractmethod def finished(self) -> Any: """Called when all data has successfully been exported and written. diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index 6f300b8e1119..1b97aaeed134 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -296,3 +296,30 @@ def test_connections(self) -> None: self.assertEqual(args[0][0]["user_agent"], "user_agent") self.assertGreater(args[0][0]["last_seen"], 0) self.assertNotIn("access_token", args[0][0]) + + def test_account_data(self) -> None: + """Tests that user account data get exported.""" + # add account data + self.get_success( + self._store.add_account_data_for_user(self.user2, "m.global", {"a": 1}) + ) + self.get_success( + self._store.add_account_data_to_room( + self.user2, "test_room", "m.per_room", {"b": 2} + ) + ) + + writer = Mock() + + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) + + # two calls, one call for user data and one call for room data + writer.write_account_data.assert_called() + + args = writer.write_account_data.call_args_list[0][0] + self.assertEqual(args[0], "global") + self.assertEqual(args[1]["m.global"]["a"], 1) + + args = writer.write_account_data.call_args_list[1][0] + self.assertEqual(args[0], "test_room") + self.assertEqual(args[1]["m.per_room"]["b"], 2) From c9b9143655d39ef691bd21ebc03434feb0dc377f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 17 Feb 2023 13:19:38 -0500 Subject: [PATCH 206/344] Fix-up type hints in tests/server.py. (#15084) This file was being ignored by mypy, we remove that and add the missing type hints & deal with any fallout. --- changelog.d/15084.misc | 1 + mypy.ini | 2 - tests/appservice/test_scheduler.py | 6 +- .../test_matrix_federation_agent.py | 5 +- tests/http/test_proxyagent.py | 5 +- tests/rest/client/test_auth.py | 14 +- tests/rest/client/utils.py | 58 ++-- tests/server.py | 253 ++++++++++++------ tests/unittest.py | 11 +- 9 files changed, 226 insertions(+), 129 deletions(-) create mode 100644 changelog.d/15084.misc diff --git a/changelog.d/15084.misc b/changelog.d/15084.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15084.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index ff6e04b12fe0..94562d0bce10 100644 --- a/mypy.ini +++ b/mypy.ini @@ -31,8 +31,6 @@ exclude = (?x) |synapse/storage/databases/__init__.py |synapse/storage/databases/main/cache.py |synapse/storage/schema/ - - |tests/server.py )$ [mypy-synapse.federation.transport.client] diff --git a/tests/appservice/test_scheduler.py b/tests/appservice/test_scheduler.py index febcc1499d06..e2a3bad065da 100644 --- a/tests/appservice/test_scheduler.py +++ b/tests/appservice/test_scheduler.py @@ -11,12 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import TYPE_CHECKING, List, Optional, Sequence, Tuple, cast +from typing import List, Optional, Sequence, Tuple, cast from unittest.mock import Mock from typing_extensions import TypeAlias from twisted.internet import defer +from twisted.test.proto_helpers import MemoryReactor from synapse.appservice import ( ApplicationService, @@ -40,9 +41,6 @@ from ..utils import MockClock -if TYPE_CHECKING: - from twisted.internet.testing import MemoryReactor - class ApplicationServiceSchedulerTransactionCtrlTestCase(unittest.TestCase): def setUp(self) -> None: diff --git a/tests/http/federation/test_matrix_federation_agent.py b/tests/http/federation/test_matrix_federation_agent.py index d27422515c8f..eb7f53fee502 100644 --- a/tests/http/federation/test_matrix_federation_agent.py +++ b/tests/http/federation/test_matrix_federation_agent.py @@ -30,7 +30,7 @@ IOpenSSLClientConnectionCreator, IProtocolFactory, ) -from twisted.internet.protocol import Factory +from twisted.internet.protocol import Factory, Protocol from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web._newclient import ResponseNeverReceived from twisted.web.client import Agent @@ -466,7 +466,8 @@ def _do_get_via_proxy( else: assert isinstance(proxy_server_transport, FakeTransport) client_protocol = proxy_server_transport.other - c2s_transport = client_protocol.transport + assert isinstance(client_protocol, Protocol) + c2s_transport = checked_cast(FakeTransport, client_protocol.transport) c2s_transport.other = server_ssl_protocol self.reactor.advance(0) diff --git a/tests/http/test_proxyagent.py b/tests/http/test_proxyagent.py index 22fdc7f5f23f..cc175052ac78 100644 --- a/tests/http/test_proxyagent.py +++ b/tests/http/test_proxyagent.py @@ -28,7 +28,7 @@ _WrappingProtocol, ) from twisted.internet.interfaces import IProtocol, IProtocolFactory -from twisted.internet.protocol import Factory +from twisted.internet.protocol import Factory, Protocol from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol from twisted.web.http import HTTPChannel @@ -644,7 +644,8 @@ def _do_https_request_via_proxy( else: assert isinstance(proxy_server_transport, FakeTransport) client_protocol = proxy_server_transport.other - c2s_transport = client_protocol.transport + assert isinstance(client_protocol, Protocol) + c2s_transport = checked_cast(FakeTransport, client_protocol.transport) c2s_transport.other = server_ssl_protocol self.reactor.advance(0) diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index f4e1e7de4352..a1446100780c 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -34,7 +34,7 @@ from tests import unittest from tests.handlers.test_oidc import HAS_OIDC from tests.rest.client.utils import TEST_OIDC_CONFIG, TEST_OIDC_ISSUER -from tests.server import FakeChannel, make_request +from tests.server import FakeChannel from tests.unittest import override_config, skip_unless @@ -1322,16 +1322,8 @@ def test_logout_during_login(self) -> None: channel = self.submit_logout_token(logout_token) self.assertEqual(channel.code, 200) - # Now try to exchange the login token - channel = make_request( - self.hs.get_reactor(), - self.site, - "POST", - "/login", - content={"type": "m.login.token", "token": login_token}, - ) - # It should have failed - self.assertEqual(channel.code, 403) + # Now try to exchange the login token, it should fail. + self.helper.login_via_token(login_token, 403) @override_config( { diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index 8d6f2b6ff9cc..9532e5ddc102 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -36,6 +36,7 @@ import attr from typing_extensions import Literal +from twisted.test.proto_helpers import MemoryReactorClock from twisted.web.resource import Resource from twisted.web.server import Site @@ -67,6 +68,7 @@ class RestHelper: """ hs: HomeServer + reactor: MemoryReactorClock site: Site auth_user_id: Optional[str] @@ -142,7 +144,7 @@ def create_room_as( path = path + "?access_token=%s" % tok channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "POST", path, @@ -216,7 +218,7 @@ def knock( data["reason"] = reason channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "POST", path, @@ -313,7 +315,7 @@ def change_membership( data.update(extra_data or {}) channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "PUT", path, @@ -394,7 +396,7 @@ def send_event( path = path + "?access_token=%s" % tok channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "PUT", path, @@ -433,7 +435,7 @@ def get_event( path = path + f"?access_token={tok}" channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "GET", path, @@ -488,7 +490,7 @@ def _read_write_state( if body is not None: content = json.dumps(body).encode("utf8") - channel = make_request(self.hs.get_reactor(), self.site, method, path, content) + channel = make_request(self.reactor, self.site, method, path, content) assert channel.code == expect_code, "Expected: %d, got: %d, resp: %r" % ( expect_code, @@ -573,8 +575,8 @@ def upload_media( image_length = len(image_data) path = "/_matrix/media/r0/upload?filename=%s" % (filename,) channel = make_request( - self.hs.get_reactor(), - FakeSite(resource, self.hs.get_reactor()), + self.reactor, + FakeSite(resource, self.reactor), "POST", path, content=image_data, @@ -603,7 +605,7 @@ def whoami( expect_code: The return code to expect from attempting the whoami request """ channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "GET", "account/whoami", @@ -642,7 +644,7 @@ def login_via_oidc( ) -> Tuple[JsonDict, FakeAuthorizationGrant]: """Log in (as a new user) via OIDC - Returns the result of the final token login. + Returns the result of the final token login and the fake authorization grant. Requires that "oidc_config" in the homeserver config be set appropriately (TEST_OIDC_CONFIG is a suitable example) - and by implication, needs a @@ -672,10 +674,28 @@ def login_via_oidc( assert m, channel.text_body login_token = m.group(1) - # finally, submit the matrix login token to the login API, which gives us our - # matrix access token and device id. + return self.login_via_token(login_token, expected_status), grant + + def login_via_token( + self, + login_token: str, + expected_status: int = 200, + ) -> JsonDict: + """Submit the matrix login token to the login API, which gives us our + matrix access token and device id.Log in (as a new user) via OIDC + + Returns the result of the token login. + + Requires that "oidc_config" in the homeserver config be set appropriately + (TEST_OIDC_CONFIG is a suitable example) - and by implication, needs a + "public_base_url". + + Also requires the login servlet and the OIDC callback resource to be mounted at + the normal places. + """ + channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "POST", "/login", @@ -684,7 +704,7 @@ def login_via_oidc( assert ( channel.code == expected_status ), f"unexpected status in response: {channel.code}" - return channel.json_body, grant + return channel.json_body def auth_via_oidc( self, @@ -805,7 +825,7 @@ def complete_oidc_auth( with fake_serer.patch_homeserver(hs=self.hs): # now hit the callback URI with the right params and a made-up code channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "GET", callback_uri, @@ -849,7 +869,7 @@ def initiate_sso_login( # is the easiest way of figuring out what the Host header ought to be set to # to keep Synapse happy. channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "GET", uri, @@ -867,7 +887,7 @@ def get_location(channel: FakeChannel) -> str: location = get_location(channel) parts = urllib.parse.urlsplit(location) channel = make_request( - self.hs.get_reactor(), + self.reactor, self.site, "GET", urllib.parse.urlunsplit(("", "") + parts[2:]), @@ -900,9 +920,7 @@ def initiate_sso_ui_auth( + urllib.parse.urlencode({"session": ui_auth_session_id}) ) # hit the redirect url (which will issue a cookie and state) - channel = make_request( - self.hs.get_reactor(), self.site, "GET", sso_redirect_endpoint - ) + channel = make_request(self.reactor, self.site, "GET", sso_redirect_endpoint) # that should serve a confirmation page assert channel.code == HTTPStatus.OK, channel.text_body channel.extract_cookies(cookies) diff --git a/tests/server.py b/tests/server.py index 237bcad8ba6f..5de97227669c 100644 --- a/tests/server.py +++ b/tests/server.py @@ -22,20 +22,25 @@ from collections import deque from io import SEEK_END, BytesIO from typing import ( + Any, + Awaitable, Callable, Dict, Iterable, List, MutableMapping, Optional, + Sequence, Tuple, Type, + TypeVar, Union, + cast, ) from unittest.mock import Mock import attr -from typing_extensions import Deque +from typing_extensions import Deque, ParamSpec from zope.interface import implementer from twisted.internet import address, threads, udp @@ -44,8 +49,10 @@ from twisted.internet.error import DNSLookupError from twisted.internet.interfaces import ( IAddress, + IConnector, IConsumer, IHostnameResolver, + IProducer, IProtocol, IPullProducer, IPushProducer, @@ -54,6 +61,8 @@ IResolverSimple, ITransport, ) +from twisted.internet.protocol import ClientFactory, DatagramProtocol +from twisted.python import threadpool from twisted.python.failure import Failure from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock from twisted.web.http_headers import Headers @@ -61,6 +70,7 @@ from twisted.web.server import Request, Site from synapse.config.database import DatabaseConnectionConfig +from synapse.config.homeserver import HomeServerConfig from synapse.events.presence_router import load_legacy_presence_router from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.events.third_party_rules import load_legacy_third_party_event_rules @@ -88,6 +98,9 @@ logger = logging.getLogger(__name__) +R = TypeVar("R") +P = ParamSpec("P") + # the type of thing that can be passed into `make_request` in the headers list CustomHeaderType = Tuple[Union[str, bytes], Union[str, bytes]] @@ -98,12 +111,14 @@ class TimedOutException(Exception): """ -@implementer(IConsumer) +@implementer(ITransport, IPushProducer, IConsumer) @attr.s(auto_attribs=True) class FakeChannel: """ A fake Twisted Web Channel (the part that interfaces with the wire). + + See twisted.web.http.HTTPChannel. """ site: Union[Site, "FakeSite"] @@ -142,7 +157,7 @@ def text_body(self) -> str: Raises an exception if the request has not yet completed. """ - if not self.is_finished: + if not self.is_finished(): raise Exception("Request not yet completed") return self.result["body"].decode("utf8") @@ -165,27 +180,36 @@ def headers(self) -> Headers: h.addRawHeader(*i) return h - def writeHeaders(self, version, code, reason, headers): + def writeHeaders( + self, version: bytes, code: bytes, reason: bytes, headers: Headers + ) -> None: self.result["version"] = version self.result["code"] = code self.result["reason"] = reason self.result["headers"] = headers - def write(self, content: bytes) -> None: - assert isinstance(content, bytes), "Should be bytes! " + repr(content) + def write(self, data: bytes) -> None: + assert isinstance(data, bytes), "Should be bytes! " + repr(data) if "body" not in self.result: self.result["body"] = b"" - self.result["body"] += content + self.result["body"] += data + + def writeSequence(self, data: Iterable[bytes]) -> None: + for x in data: + self.write(x) + + def loseConnection(self) -> None: + self.unregisterProducer() + self.transport.loseConnection() # Type ignore: mypy doesn't like the fact that producer isn't an IProducer. - def registerProducer( # type: ignore[override] - self, - producer: Union[IPullProducer, IPushProducer], - streaming: bool, - ) -> None: - self._producer = producer + def registerProducer(self, producer: IProducer, streaming: bool) -> None: + # TODO This should ensure that the IProducer is an IPushProducer or + # IPullProducer, unfortunately twisted.protocols.basic.FileSender does + # implement those, but doesn't declare it. + self._producer = cast(Union[IPushProducer, IPullProducer], producer) self.producerStreaming = streaming def _produce() -> None: @@ -202,6 +226,16 @@ def unregisterProducer(self) -> None: self._producer = None + def stopProducing(self) -> None: + if self._producer is not None: + self._producer.stopProducing() + + def pauseProducing(self) -> None: + raise NotImplementedError() + + def resumeProducing(self) -> None: + raise NotImplementedError() + def requestDone(self, _self: Request) -> None: self.result["done"] = True if isinstance(_self, SynapseRequest): @@ -281,12 +315,12 @@ def __init__( self.reactor = reactor self.experimental_cors_msc3886 = experimental_cors_msc3886 - def getResourceFor(self, request): + def getResourceFor(self, request: Request) -> IResource: return self._resource def make_request( - reactor, + reactor: MemoryReactorClock, site: Union[Site, FakeSite], method: Union[bytes, str], path: Union[bytes, str], @@ -409,19 +443,21 @@ class ThreadedMemoryReactorClock(MemoryReactorClock): A MemoryReactorClock that supports callFromThread. """ - def __init__(self): + def __init__(self) -> None: self.threadpool = ThreadPool(self) self._tcp_callbacks: Dict[Tuple[str, int], Callable] = {} - self._udp = [] + self._udp: List[udp.Port] = [] self.lookups: Dict[str, str] = {} - self._thread_callbacks: Deque[Callable[[], None]] = deque() + self._thread_callbacks: Deque[Callable[..., R]] = deque() lookups = self.lookups @implementer(IResolverSimple) class FakeResolver: - def getHostByName(self, name, timeout=None): + def getHostByName( + self, name: str, timeout: Optional[Sequence[int]] = None + ) -> "Deferred[str]": if name not in lookups: return fail(DNSLookupError("OH NO: unknown %s" % (name,))) return succeed(lookups[name]) @@ -432,25 +468,44 @@ def getHostByName(self, name, timeout=None): def installNameResolver(self, resolver: IHostnameResolver) -> IHostnameResolver: raise NotImplementedError() - def listenUDP(self, port, protocol, interface="", maxPacketSize=8196): + def listenUDP( + self, + port: int, + protocol: DatagramProtocol, + interface: str = "", + maxPacketSize: int = 8196, + ) -> udp.Port: p = udp.Port(port, protocol, interface, maxPacketSize, self) p.startListening() self._udp.append(p) return p - def callFromThread(self, callback, *args, **kwargs): + def callFromThread( + self, callable: Callable[..., Any], *args: object, **kwargs: object + ) -> None: """ Make the callback fire in the next reactor iteration. """ - cb = lambda: callback(*args, **kwargs) + cb = lambda: callable(*args, **kwargs) # it's not safe to call callLater() here, so we append the callback to a # separate queue. self._thread_callbacks.append(cb) - def getThreadPool(self): - return self.threadpool + def callInThread( + self, callable: Callable[..., Any], *args: object, **kwargs: object + ) -> None: + raise NotImplementedError() + + def suggestThreadPoolSize(self, size: int) -> None: + raise NotImplementedError() + + def getThreadPool(self) -> "threadpool.ThreadPool": + # Cast to match super-class. + return cast(threadpool.ThreadPool, self.threadpool) - def add_tcp_client_callback(self, host: str, port: int, callback: Callable): + def add_tcp_client_callback( + self, host: str, port: int, callback: Callable[[], None] + ) -> None: """Add a callback that will be invoked when we receive a connection attempt to the given IP/port using `connectTCP`. @@ -459,7 +514,14 @@ def add_tcp_client_callback(self, host: str, port: int, callback: Callable): """ self._tcp_callbacks[(host, port)] = callback - def connectTCP(self, host: str, port: int, factory, timeout=30, bindAddress=None): + def connectTCP( + self, + host: str, + port: int, + factory: ClientFactory, + timeout: float = 30, + bindAddress: Optional[Tuple[str, int]] = None, + ) -> IConnector: """Fake L{IReactorTCP.connectTCP}.""" conn = super().connectTCP( @@ -472,7 +534,7 @@ def connectTCP(self, host: str, port: int, factory, timeout=30, bindAddress=None return conn - def advance(self, amount): + def advance(self, amount: float) -> None: # first advance our reactor's time, and run any "callLater" callbacks that # makes ready super().advance(amount) @@ -500,25 +562,33 @@ def advance(self, amount): class ThreadPool: """ Threadless thread pool. + + See twisted.python.threadpool.ThreadPool """ - def __init__(self, reactor): + def __init__(self, reactor: IReactorTime): self._reactor = reactor - def start(self): + def start(self) -> None: pass - def stop(self): + def stop(self) -> None: pass - def callInThreadWithCallback(self, onResult, function, *args, **kwargs): - def _(res): + def callInThreadWithCallback( + self, + onResult: Callable[[bool, Union[Failure, R]], None], + function: Callable[P, R], + *args: P.args, + **kwargs: P.kwargs, + ) -> "Deferred[None]": + def _(res: Any) -> None: if isinstance(res, Failure): onResult(False, res) else: onResult(True, res) - d = Deferred() + d: "Deferred[None]" = Deferred() d.addCallback(lambda x: function(*args, **kwargs)) d.addBoth(_) self._reactor.callLater(0, d.callback, True) @@ -535,7 +605,9 @@ def _make_test_homeserver_synchronous(server: HomeServer) -> None: for database in server.get_datastores().databases: pool = database._db_pool - def runWithConnection(func, *args, **kwargs): + def runWithConnection( + func: Callable[..., R], *args: Any, **kwargs: Any + ) -> Awaitable[R]: return threads.deferToThreadPool( pool._reactor, pool.threadpool, @@ -545,20 +617,23 @@ def runWithConnection(func, *args, **kwargs): **kwargs, ) - def runInteraction(interaction, *args, **kwargs): + def runInteraction( + desc: str, func: Callable[..., R], *args: Any, **kwargs: Any + ) -> Awaitable[R]: return threads.deferToThreadPool( pool._reactor, pool.threadpool, pool._runInteraction, - interaction, + desc, + func, *args, **kwargs, ) - pool.runWithConnection = runWithConnection - pool.runInteraction = runInteraction + pool.runWithConnection = runWithConnection # type: ignore[assignment] + pool.runInteraction = runInteraction # type: ignore[assignment] # Replace the thread pool with a threadless 'thread' pool - pool.threadpool = ThreadPool(clock._reactor) + pool.threadpool = ThreadPool(clock._reactor) # type: ignore[assignment] pool.running = True # We've just changed the Databases to run DB transactions on the same @@ -573,7 +648,7 @@ def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]: @implementer(ITransport) -@attr.s(cmp=False) +@attr.s(cmp=False, auto_attribs=True) class FakeTransport: """ A twisted.internet.interfaces.ITransport implementation which sends all its data @@ -588,48 +663,50 @@ class FakeTransport: If you want bidirectional communication, you'll need two instances. """ - other = attr.ib() + other: IProtocol """The Protocol object which will receive any data written to this transport. - - :type: twisted.internet.interfaces.IProtocol """ - _reactor = attr.ib() + _reactor: IReactorTime """Test reactor - - :type: twisted.internet.interfaces.IReactorTime """ - _protocol = attr.ib(default=None) + _protocol: Optional[IProtocol] = None """The Protocol which is producing data for this transport. Optional, but if set will get called back for connectionLost() notifications etc. """ - _peer_address: Optional[IAddress] = attr.ib(default=None) + _peer_address: IAddress = attr.Factory( + lambda: address.IPv4Address("TCP", "127.0.0.1", 5678) + ) """The value to be returned by getPeer""" - _host_address: Optional[IAddress] = attr.ib(default=None) + _host_address: IAddress = attr.Factory( + lambda: address.IPv4Address("TCP", "127.0.0.1", 1234) + ) """The value to be returned by getHost""" disconnecting = False disconnected = False connected = True - buffer = attr.ib(default=b"") - producer = attr.ib(default=None) - autoflush = attr.ib(default=True) + buffer: bytes = b"" + producer: Optional[IPushProducer] = None + autoflush: bool = True - def getPeer(self) -> Optional[IAddress]: + def getPeer(self) -> IAddress: return self._peer_address - def getHost(self) -> Optional[IAddress]: + def getHost(self) -> IAddress: return self._host_address - def loseConnection(self, reason=None): + def loseConnection(self) -> None: if not self.disconnecting: - logger.info("FakeTransport: loseConnection(%s)", reason) + logger.info("FakeTransport: loseConnection()") self.disconnecting = True if self._protocol: - self._protocol.connectionLost(reason) + self._protocol.connectionLost( + Failure(RuntimeError("FakeTransport.loseConnection()")) + ) # if we still have data to write, delay until that is done if self.buffer: @@ -640,38 +717,38 @@ def loseConnection(self, reason=None): self.connected = False self.disconnected = True - def abortConnection(self): + def abortConnection(self) -> None: logger.info("FakeTransport: abortConnection()") if not self.disconnecting: self.disconnecting = True if self._protocol: - self._protocol.connectionLost(None) + self._protocol.connectionLost(None) # type: ignore[arg-type] self.disconnected = True - def pauseProducing(self): + def pauseProducing(self) -> None: if not self.producer: return self.producer.pauseProducing() - def resumeProducing(self): + def resumeProducing(self) -> None: if not self.producer: return self.producer.resumeProducing() - def unregisterProducer(self): + def unregisterProducer(self) -> None: if not self.producer: return self.producer = None - def registerProducer(self, producer, streaming): + def registerProducer(self, producer: IPushProducer, streaming: bool) -> None: self.producer = producer self.producerStreaming = streaming - def _produce(): + def _produce() -> None: if not self.producer: # we've been unregistered return @@ -683,7 +760,7 @@ def _produce(): if not streaming: self._reactor.callLater(0.0, _produce) - def write(self, byt): + def write(self, byt: bytes) -> None: if self.disconnecting: raise Exception("Writing to disconnecting FakeTransport") @@ -695,11 +772,11 @@ def write(self, byt): if self.autoflush: self._reactor.callLater(0.0, self.flush) - def writeSequence(self, seq): + def writeSequence(self, seq: Iterable[bytes]) -> None: for x in seq: self.write(x) - def flush(self, maxbytes=None): + def flush(self, maxbytes: Optional[int] = None) -> None: if not self.buffer: # nothing to do. Don't write empty buffers: it upsets the # TLSMemoryBIOProtocol @@ -750,17 +827,17 @@ def connect_client( class TestHomeServer(HomeServer): - DATASTORE_CLASS = DataStore + DATASTORE_CLASS = DataStore # type: ignore[assignment] def setup_test_homeserver( - cleanup_func, - name="test", - config=None, - reactor=None, + cleanup_func: Callable[[Callable[[], None]], None], + name: str = "test", + config: Optional[HomeServerConfig] = None, + reactor: Optional[ISynapseReactor] = None, homeserver_to_use: Type[HomeServer] = TestHomeServer, - **kwargs, -): + **kwargs: Any, +) -> HomeServer: """ Setup a homeserver suitable for running tests against. Keyword arguments are passed to the Homeserver constructor. @@ -775,13 +852,14 @@ def setup_test_homeserver( HomeserverTestCase. """ if reactor is None: - from twisted.internet import reactor + from twisted.internet import reactor as _reactor + + reactor = cast(ISynapseReactor, _reactor) if config is None: config = default_config(name, parse=True) config.caches.resize_all_caches() - config.ldap_enabled = False if "clock" not in kwargs: kwargs["clock"] = MockClock() @@ -832,6 +910,8 @@ def setup_test_homeserver( # Create the database before we actually try and connect to it, based off # the template database we generate in setupdb() if isinstance(db_engine, PostgresEngine): + import psycopg2.extensions + db_conn = db_engine.module.connect( database=POSTGRES_BASE_DB, user=POSTGRES_USER, @@ -839,6 +919,7 @@ def setup_test_homeserver( port=POSTGRES_PORT, password=POSTGRES_PASSWORD, ) + assert isinstance(db_conn, psycopg2.extensions.connection) db_conn.autocommit = True cur = db_conn.cursor() cur.execute("DROP DATABASE IF EXISTS %s;" % (test_db,)) @@ -867,14 +948,15 @@ def setup_test_homeserver( hs.setup_background_tasks() if isinstance(db_engine, PostgresEngine): - database = hs.get_datastores().databases[0] + database_pool = hs.get_datastores().databases[0] # We need to do cleanup on PostgreSQL - def cleanup(): + def cleanup() -> None: import psycopg2 + import psycopg2.extensions # Close all the db pools - database._db_pool.close() + database_pool._db_pool.close() dropped = False @@ -886,6 +968,7 @@ def cleanup(): port=POSTGRES_PORT, password=POSTGRES_PASSWORD, ) + assert isinstance(db_conn, psycopg2.extensions.connection) db_conn.autocommit = True cur = db_conn.cursor() @@ -918,23 +1001,23 @@ def cleanup(): # Need to let the HS build an auth handler and then mess with it # because AuthHandler's constructor requires the HS, so we can't make one # beforehand and pass it in to the HS's constructor (chicken / egg) - async def hash(p): + async def hash(p: str) -> str: return hashlib.md5(p.encode("utf8")).hexdigest() - hs.get_auth_handler().hash = hash + hs.get_auth_handler().hash = hash # type: ignore[assignment] - async def validate_hash(p, h): + async def validate_hash(p: str, h: str) -> bool: return hashlib.md5(p.encode("utf8")).hexdigest() == h - hs.get_auth_handler().validate_hash = validate_hash + hs.get_auth_handler().validate_hash = validate_hash # type: ignore[assignment] # Make the threadpool and database transactions synchronous for testing. _make_test_homeserver_synchronous(hs) # Load any configured modules into the homeserver module_api = hs.get_module_api() - for module, config in hs.config.modules.loaded_modules: - module(config=config, api=module_api) + for module, module_config in hs.config.modules.loaded_modules: + module(config=module_config, api=module_api) load_legacy_spam_checkers(hs) load_legacy_third_party_event_rules(hs) diff --git a/tests/unittest.py b/tests/unittest.py index c1cb5933faed..b21e7f122196 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -45,7 +45,7 @@ from twisted.internet.defer import Deferred, ensureDeferred from twisted.python.failure import Failure from twisted.python.threadpool import ThreadPool -from twisted.test.proto_helpers import MemoryReactor +from twisted.test.proto_helpers import MemoryReactor, MemoryReactorClock from twisted.trial import unittest from twisted.web.resource import Resource from twisted.web.server import Request @@ -82,7 +82,7 @@ ) from tests.test_utils import event_injection, setup_awaitable_errors from tests.test_utils.logging_setup import setup_logging -from tests.utils import default_config, setupdb +from tests.utils import checked_cast, default_config, setupdb setupdb() setup_logging() @@ -296,7 +296,12 @@ def setUp(self) -> None: from tests.rest.client.utils import RestHelper - self.helper = RestHelper(self.hs, self.site, getattr(self, "user_id", None)) + self.helper = RestHelper( + self.hs, + checked_cast(MemoryReactorClock, self.hs.get_reactor()), + self.site, + getattr(self, "user_id", None), + ) if hasattr(self, "user_id"): if self.hijack_auth: From 77157f21ebc6877827c63ec748cd9186ef04e9d2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Feb 2023 08:13:36 +0000 Subject: [PATCH 207/344] Bump types-jsonschema from 4.17.0.3 to 4.17.0.5 (#15099) * Bump types-jsonschema from 4.17.0.3 to 4.17.0.5 Bumps [types-jsonschema](https://github.com/python/typeshed) from 4.17.0.3 to 4.17.0.5. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-jsonschema dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15099.misc | 1 + poetry.lock | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15099.misc diff --git a/changelog.d/15099.misc b/changelog.d/15099.misc new file mode 100644 index 000000000000..53ed621ccf0e --- /dev/null +++ b/changelog.d/15099.misc @@ -0,0 +1 @@ +Bump types-jsonschema from 4.17.0.3 to 4.17.0.5. diff --git a/poetry.lock b/poetry.lock index eb1e3d797bd6..20319fb7c65f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1708,7 +1708,7 @@ files = [ cffi = ">=1.4.1" [package.extras] -docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] @@ -2624,14 +2624,14 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.17.0.3" +version = "4.17.0.5" description = "Typing stubs for jsonschema" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-jsonschema-4.17.0.3.tar.gz", hash = "sha256:746aa466ffed9a1acc7bdbd0ac0b5e068f00be2ee008c1d1e14b0944a8c8b24b"}, - {file = "types_jsonschema-4.17.0.3-py3-none-any.whl", hash = "sha256:c8d5b26b7c8da6a48d7fb1ce029b97e0ff6e74db3727efb968c69f39ad013685"}, + {file = "types-jsonschema-4.17.0.5.tar.gz", hash = "sha256:7adc7bfca4afe291de0c93eca9367aa72a4fbe8ce87fe15642c600ad97d45dd6"}, + {file = "types_jsonschema-4.17.0.5-py3-none-any.whl", hash = "sha256:79ac8a7763fe728947af90a24168b91621edf7e8425bf3670abd4ea0d4758fba"}, ] [[package]] From e9d01ff3b8fd8567f3a51f9bdc361759abb3b578 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Feb 2023 08:13:47 +0000 Subject: [PATCH 208/344] Bump types-bleach from 5.0.3.1 to 6.0.0.0 (#15100) * Bump types-bleach from 5.0.3.1 to 6.0.0.0 Bumps [types-bleach](https://github.com/python/typeshed) from 5.0.3.1 to 6.0.0.0. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-bleach dependency-type: direct:development update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15100.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15100.misc diff --git a/changelog.d/15100.misc b/changelog.d/15100.misc new file mode 100644 index 000000000000..035d09454916 --- /dev/null +++ b/changelog.d/15100.misc @@ -0,0 +1 @@ +Bump types-bleach from 5.0.3.1 to 6.0.0.0. diff --git a/poetry.lock b/poetry.lock index 20319fb7c65f..da303676c4c2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2548,14 +2548,14 @@ files = [ [[package]] name = "types-bleach" -version = "5.0.3.1" +version = "6.0.0.0" description = "Typing stubs for bleach" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-bleach-5.0.3.1.tar.gz", hash = "sha256:ce8772ea5126dab1883851b41e3aeff229aa5213ced36096990344e632e92373"}, - {file = "types_bleach-5.0.3.1-py3-none-any.whl", hash = "sha256:af5f1b3a54ff279f54c29eccb2e6988ebb6718bc4061469588a5fd4880a79287"}, + {file = "types-bleach-6.0.0.0.tar.gz", hash = "sha256:770ce9c7ea6173743ef1a4a70f2619bb1819bf53c7cd0336d939af93f488fbe2"}, + {file = "types_bleach-6.0.0.0-py3-none-any.whl", hash = "sha256:75f55f035837c5fce2cd0bd5162a2a90057680a89c9275588a5c12f5f597a14a"}, ] [[package]] From 349c3a4feec096ee548be2611dbec60300cb2bfb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Feb 2023 08:13:58 +0000 Subject: [PATCH 209/344] Bump dtolnay/rust-toolchain from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295 (#15101) * Bump dtolnay/rust-toolchain Bumps [dtolnay/rust-toolchain](https://github.com/dtolnay/rust-toolchain) from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295. - [Release notes](https://github.com/dtolnay/rust-toolchain/releases) - [Commits](https://github.com/dtolnay/rust-toolchain/compare/25dc93b901a87e864900a8aec6c12e9aa794c0c3...e12eda571dc9a5ee5d58eecf4738ec291c66f295) --- updated-dependencies: - dependency-name: dtolnay/rust-toolchain dependency-type: direct:production ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/latest_deps.yml | 6 +++--- .github/workflows/tests.yml | 18 +++++++++--------- .github/workflows/twisted_trunk.yml | 6 +++--- changelog.d/15101.misc | 1 + 4 files changed, 16 insertions(+), 15 deletions(-) create mode 100644 changelog.d/15101.misc diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 8485daf87ffe..6da7c22e4c33 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -27,7 +27,7 @@ jobs: steps: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -61,7 +61,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -134,7 +134,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 94f7f2657c86..cfafeaadc93b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -112,7 +112,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 components: clippy @@ -134,7 +134,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: nightly-2022-12-01 components: clippy @@ -154,7 +154,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 components: rustfmt @@ -221,7 +221,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -266,7 +266,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -386,7 +386,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -531,7 +531,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -562,7 +562,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: 1.58.1 - uses: Swatinem/rust-cache@v2 @@ -585,7 +585,7 @@ jobs: # There don't seem to be versioned releases of this action per se: for each rust # version there is a branch which gets constantly rebased on top of master. # We pin to a specific commit for paranoia's sake. - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: nightly-2022-12-01 - uses: Swatinem/rust-cache@v2 diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 5654d2f3e2ad..db514571c4af 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -43,7 +43,7 @@ jobs: - run: sudo apt-get -qq install xmlsec1 - name: Install Rust - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 @@ -82,7 +82,7 @@ jobs: - uses: actions/checkout@v3 - name: Install Rust - uses: dtolnay/rust-toolchain@25dc93b901a87e864900a8aec6c12e9aa794c0c3 + uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: toolchain: stable - uses: Swatinem/rust-cache@v2 diff --git a/changelog.d/15101.misc b/changelog.d/15101.misc new file mode 100644 index 000000000000..807dbb554037 --- /dev/null +++ b/changelog.d/15101.misc @@ -0,0 +1 @@ +Bump dtolnay/rust-toolchain from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295. From 46e9ce5424163856e34e02acb80f958484763a6f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Feb 2023 08:14:07 +0000 Subject: [PATCH 210/344] Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0 (#15102) * Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0 Bumps [dawidd6/action-download-artifact](https://github.com/dawidd6/action-download-artifact) from 2.24.3 to 2.25.0. - [Release notes](https://github.com/dawidd6/action-download-artifact/releases) - [Commits](https://github.com/dawidd6/action-download-artifact/compare/bd10f381a96414ce2b13a11bfa89902ba7cea07f...b59d8c6a6c5c6c6437954f470d963c0b20ea7415) --- updated-dependencies: - dependency-name: dawidd6/action-download-artifact dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/docs-pr-netlify.yaml | 2 +- changelog.d/15102.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15102.misc diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml index ef7a38144e00..1704b3ce93ac 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml @@ -14,7 +14,7 @@ jobs: # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: - name: 📥 Download artifact - uses: dawidd6/action-download-artifact@bd10f381a96414ce2b13a11bfa89902ba7cea07f # v2.24.3 + uses: dawidd6/action-download-artifact@b59d8c6a6c5c6c6437954f470d963c0b20ea7415 # v2.25.0 with: workflow: docs-pr.yaml run_id: ${{ github.event.workflow_run.id }} diff --git a/changelog.d/15102.misc b/changelog.d/15102.misc new file mode 100644 index 000000000000..bb6037d52e1b --- /dev/null +++ b/changelog.d/15102.misc @@ -0,0 +1 @@ +Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0. From e38b8262fbd46e2a02d77ad9187b0c1c9c503f1e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Feb 2023 08:14:37 +0000 Subject: [PATCH 211/344] Bump types-pillow from 9.4.0.10 to 9.4.0.13 (#15104) * Bump types-pillow from 9.4.0.10 to 9.4.0.13 Bumps [types-pillow](https://github.com/python/typeshed) from 9.4.0.10 to 9.4.0.13. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-pillow dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15104.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15104.misc diff --git a/changelog.d/15104.misc b/changelog.d/15104.misc new file mode 100644 index 000000000000..e9fa8759263e --- /dev/null +++ b/changelog.d/15104.misc @@ -0,0 +1 @@ +Bump types-pillow from 9.4.0.10 to 9.4.0.13. diff --git a/poetry.lock b/poetry.lock index da303676c4c2..f74a392d1bc9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2648,14 +2648,14 @@ files = [ [[package]] name = "types-pillow" -version = "9.4.0.10" +version = "9.4.0.13" description = "Typing stubs for Pillow" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-Pillow-9.4.0.10.tar.gz", hash = "sha256:341c2345610bba452d1724757c7b997a60f593cf003c101ba239db003a0ae389"}, - {file = "types_Pillow-9.4.0.10-py3-none-any.whl", hash = "sha256:302ce81cfb61aacc8983a3a2ec682cbef66522a2fe8e640f648ac2e3d6f6af53"}, + {file = "types-Pillow-9.4.0.13.tar.gz", hash = "sha256:4510aa98a28947bf63f2b29edebbd11b7cff8647d90b867cec9b3674c0a8c321"}, + {file = "types_Pillow-9.4.0.13-py3-none-any.whl", hash = "sha256:14a8a19021b8fe569a9fef9edc64a8d8a4aef340e38669d4fb3dc05cfd941130"}, ] [[package]] From 7ee7f493167d243aaf8adb2958edbc841060f2a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Feb 2023 08:14:48 +0000 Subject: [PATCH 212/344] Bump types-setuptools from 67.1.0.0 to 67.3.0.1 (#15105) * Bump types-setuptools from 67.1.0.0 to 67.3.0.1 Bumps [types-setuptools](https://github.com/python/typeshed) from 67.1.0.0 to 67.3.0.1. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-setuptools dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15105.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15105.misc diff --git a/changelog.d/15105.misc b/changelog.d/15105.misc new file mode 100644 index 000000000000..5bce811d32f6 --- /dev/null +++ b/changelog.d/15105.misc @@ -0,0 +1 @@ +Bump types-setuptools from 67.1.0.0 to 67.3.0.1. diff --git a/poetry.lock b/poetry.lock index f74a392d1bc9..ffb230139c46 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2714,14 +2714,14 @@ types-urllib3 = "<1.27" [[package]] name = "types-setuptools" -version = "67.1.0.0" +version = "67.3.0.1" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-67.1.0.0.tar.gz", hash = "sha256:162a39d22e3a5eb802197c84f16b19e798101bbd33d9437837fbb45627da5627"}, - {file = "types_setuptools-67.1.0.0-py3-none-any.whl", hash = "sha256:5bd7a10d93e468bfcb10d24cb8ea5e12ac4f4ac91267293959001f1448cf0619"}, + {file = "types-setuptools-67.3.0.1.tar.gz", hash = "sha256:1a26d373036c720e566823b6edd664a2db4d138b6eeba856721ec1254203474f"}, + {file = "types_setuptools-67.3.0.1-py3-none-any.whl", hash = "sha256:a7e0f0816b5b449f5bcdc0efa43da91ff81dbe6941f293a6490d68a450e130a1"}, ] [package.dependencies] From 1cbc3f197cc1b9732649ffb769b05d90c0e904d7 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 20 Feb 2023 12:00:18 +0000 Subject: [PATCH 213/344] Fix a bug introduced in Synapse v1.74.0 where searching with colons when using ICU for search term tokenisation would fail with an error. (#15079) Co-authored-by: David Robertson --- changelog.d/15079.bugfix | 1 + .../storage/databases/main/user_directory.py | 24 +++++-- tests/handlers/test_user_directory.py | 7 +++ tests/storage/test_user_directory.py | 63 ++++++++++++++++++- 4 files changed, 90 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15079.bugfix diff --git a/changelog.d/15079.bugfix b/changelog.d/15079.bugfix new file mode 100644 index 000000000000..907892c1ef5f --- /dev/null +++ b/changelog.d/15079.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.74.0 where searching with colons when using ICU for search term tokenisation would fail with an error. \ No newline at end of file diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index f6a6fd4079ec..30af4b3b6cfe 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -918,11 +918,19 @@ def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]: We use this so that we can add prefix matching, which isn't something that is supported by default. """ - results = _parse_words(search_term) + escaped_words = [] + for word in _parse_words(search_term): + # Postgres tsvector and tsquery quoting rules: + # words potentially containing punctuation should be quoted + # and then existing quotes and backslashes should be doubled + # See: https://www.postgresql.org/docs/current/datatype-textsearch.html#DATATYPE-TSQUERY + + quoted_word = word.replace("'", "''").replace("\\", "\\\\") + escaped_words.append(f"'{quoted_word}'") - both = " & ".join("(%s:* | %s)" % (result, result) for result in results) - exact = " & ".join("%s" % (result,) for result in results) - prefix = " & ".join("%s:*" % (result,) for result in results) + both = " & ".join("(%s:* | %s)" % (word, word) for word in escaped_words) + exact = " & ".join("%s" % (word,) for word in escaped_words) + prefix = " & ".join("%s:*" % (word,) for word in escaped_words) return both, exact, prefix @@ -944,6 +952,14 @@ def _parse_words(search_term: str) -> List[str]: if USE_ICU: return _parse_words_with_icu(search_term) + return _parse_words_with_regex(search_term) + + +def _parse_words_with_regex(search_term: str) -> List[str]: + """ + Break down search term into words, when we don't have ICU available. + See: `_parse_words` + """ return re.findall(r"([\w\-]+)", search_term, re.UNICODE) diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index f65a68b9c2df..a02c1c62279a 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -192,6 +192,13 @@ def test_excludes_appservice_sender(self) -> None: self.helper.join(room, self.appservice.sender, tok=self.appservice.token) self._check_only_one_user_in_directory(user, room) + def test_search_term_with_colon_in_it_does_not_raise(self) -> None: + """ + Regression test: Test that search terms with colons in them are acceptable. + """ + u1 = self.register_user("user1", "pass") + self.get_success(self.handler.search_users(u1, "haha:paamayim-nekudotayim", 10)) + def test_user_not_in_users_table(self) -> None: """Unclear how it happens, but on matrix.org we've seen join events for users who aren't in the users table. Test that we don't fall over diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index f1ca523d23f9..2d169684cf2e 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -25,6 +25,11 @@ from synapse.server import HomeServer from synapse.storage import DataStore from synapse.storage.background_updates import _BackgroundUpdateHandler +from synapse.storage.databases.main import user_directory +from synapse.storage.databases.main.user_directory import ( + _parse_words_with_icu, + _parse_words_with_regex, +) from synapse.storage.roommember import ProfileInfo from synapse.util import Clock @@ -42,7 +47,7 @@ BOB = "@bob:b" BOBBY = "@bobby:a" # The localpart isn't 'Bela' on purpose so we can test looking up display names. -BELA = "@somenickname:a" +BELA = "@somenickname:example.org" class GetUserDirectoryTables: @@ -423,6 +428,8 @@ async def mocked_process_users(*args: Any, **kwargs: Any) -> int: class UserDirectoryStoreTestCase(HomeserverTestCase): + use_icu = False + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.store = hs.get_datastores().main @@ -434,6 +441,12 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.get_success(self.store.update_profile_in_user_dir(BELA, "Bela", None)) self.get_success(self.store.add_users_in_public_rooms("!room:id", (ALICE, BOB))) + self._restore_use_icu = user_directory.USE_ICU + user_directory.USE_ICU = self.use_icu + + def tearDown(self) -> None: + user_directory.USE_ICU = self._restore_use_icu + def test_search_user_dir(self) -> None: # normally when alice searches the directory she should just find # bob because bobby doesn't share a room with her. @@ -478,6 +491,26 @@ def test_search_user_dir_stop_words(self) -> None: {"user_id": BELA, "display_name": "Bela", "avatar_url": None}, ) + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_start_of_user_id(self) -> None: + """Tests that a user can look up another user by searching for the start + of their user ID. + """ + r = self.get_success(self.store.search_user_dir(ALICE, "somenickname:exa", 10)) + self.assertFalse(r["limited"]) + self.assertEqual(1, len(r["results"])) + self.assertDictEqual( + r["results"][0], + {"user_id": BELA, "display_name": "Bela", "avatar_url": None}, + ) + + +class UserDirectoryStoreTestCaseWithIcu(UserDirectoryStoreTestCase): + use_icu = True + + if not icu: + skip = "Requires PyICU" + class UserDirectoryICUTestCase(HomeserverTestCase): if not icu: @@ -513,3 +546,31 @@ def test_icu_word_boundary(self) -> None: r["results"][0], {"user_id": ALICE, "display_name": display_name, "avatar_url": None}, ) + + def test_icu_word_boundary_punctuation(self) -> None: + """ + Tests the behaviour of punctuation with the ICU tokeniser. + + Seems to depend on underlying version of ICU. + """ + + # Note: either tokenisation is fine, because Postgres actually splits + # words itself afterwards. + self.assertIn( + _parse_words_with_icu("lazy'fox jumped:over the.dog"), + ( + # ICU 66 on Ubuntu 20.04 + ["lazy'fox", "jumped", "over", "the", "dog"], + # ICU 70 on Ubuntu 22.04 + ["lazy'fox", "jumped:over", "the.dog"], + ), + ) + + def test_regex_word_boundary_punctuation(self) -> None: + """ + Tests the behaviour of punctuation with the non-ICU tokeniser + """ + self.assertEqual( + _parse_words_with_regex("lazy'fox jumped:over the.dog"), + ["lazy", "fox", "jumped", "over", "the", "dog"], + ) From 490a3675bd7225b5695e505fea225d7c30127551 Mon Sep 17 00:00:00 2001 From: realtyem Date: Mon, 20 Feb 2023 06:23:00 -0600 Subject: [PATCH 214/344] Allow health listener resource to load (#15096) * Allow health listener resource to load. * changelog * Update changelog.d/15096.bugfix --- changelog.d/15096.bugfix | 1 + synapse/config/server.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/15096.bugfix diff --git a/changelog.d/15096.bugfix b/changelog.d/15096.bugfix new file mode 100644 index 000000000000..09b4d861f8a5 --- /dev/null +++ b/changelog.d/15096.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.76 where workers would fail to start if the `health` listener was configured. diff --git a/synapse/config/server.py b/synapse/config/server.py index ecdaa2d9dd24..d4ef9930b007 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -177,6 +177,7 @@ def generate_ip_set( "client", "consent", "federation", + "health", "keys", "media", "metrics", From e26d7d5ae786df8d9d9a4dbd0f734e5c2f08aafd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 20 Feb 2023 13:35:24 +0000 Subject: [PATCH 215/344] Teach portdb about `un_partial_stated_event_stream` (#15108) * Sort BOOLEAN_COLUMNS and APPEND_ONLY_TABLES So I can see if a given table is present in logarithmic time, rather than linear. * Teach portdb about `un_partial_stated_event_streams` * Comments comments comments * Changelog --- changelog.d/15108.bugfix | 1 + synapse/_scripts/synapse_port_db.py | 85 ++++++++++++++++++----------- 2 files changed, 53 insertions(+), 33 deletions(-) create mode 100644 changelog.d/15108.bugfix diff --git a/changelog.d/15108.bugfix b/changelog.d/15108.bugfix new file mode 100644 index 000000000000..30af8b439d1b --- /dev/null +++ b/changelog.d/15108.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.75 where the [portdb script](https://matrix-org.github.io/synapse/release-v1.78/postgres.html#porting-from-sqlite) would fail to run after a room had been faster-joined. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 5e137dbbf711..0d35e0af8fcd 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -94,61 +94,80 @@ logger = logging.getLogger("synapse_port_db") +# SQLite doesn't have a dedicated boolean type (it stores True/False as 1/0). This means +# portdb will read sqlite bools as integers, then try to insert them into postgres +# boolean columns---which fails. Lacking some Python-parseable metaschema, we must +# specify which integer columns should be inserted as booleans into postgres. BOOLEAN_COLUMNS = { - "events": ["processed", "outlier", "contains_url"], - "rooms": ["is_public", "has_auth_chain_index"], + "access_tokens": ["used"], + "account_validity": ["email_sent"], + "device_lists_changes_in_room": ["converted_to_destinations"], + "device_lists_outbound_pokes": ["sent"], + "devices": ["hidden"], + "e2e_fallback_keys_json": ["used"], + "e2e_room_keys": ["is_verified"], "event_edges": ["is_state"], + "events": ["processed", "outlier", "contains_url"], + "local_media_repository": ["safe_from_quarantine"], "presence_list": ["accepted"], "presence_stream": ["currently_active"], "public_room_list_stream": ["visibility"], - "devices": ["hidden"], - "device_lists_outbound_pokes": ["sent"], - "users_who_share_rooms": ["share_private"], - "e2e_room_keys": ["is_verified"], - "account_validity": ["email_sent"], + "pushers": ["enabled"], "redactions": ["have_censored"], "room_stats_state": ["is_federatable"], - "local_media_repository": ["safe_from_quarantine"], + "rooms": ["is_public", "has_auth_chain_index"], "users": ["shadow_banned", "approved"], - "e2e_fallback_keys_json": ["used"], - "access_tokens": ["used"], - "device_lists_changes_in_room": ["converted_to_destinations"], - "pushers": ["enabled"], + "un_partial_stated_event_stream": ["rejection_status_changed"], + "users_who_share_rooms": ["share_private"], } +# These tables are never deleted from in normal operation [*], so we can resume porting +# over rows from a previous attempt rather than starting from scratch. +# +# [*]: We do delete from many of these tables when purging a room, and +# presumably when purging old events. So we might e.g. +# +# 1. Run portdb and port half of some table. +# 2. Stop portdb. +# 3. Purge something, deleting some of the rows we've ported over. +# 4. Restart portdb. The rows deleted from sqlite are still present in postgres. +# +# But this isn't the end of the world: we should be able to repeat the purge +# on the postgres DB when porting completes. APPEND_ONLY_TABLES = [ + "cache_invalidation_stream_by_instance", + "event_auth", + "event_edges", + "event_json", "event_reference_hashes", + "event_search", + "event_to_state_groups", "events", - "event_json", - "state_events", - "room_memberships", - "topics", - "room_names", - "rooms", + "ex_outlier_stream", "local_media_repository", "local_media_repository_thumbnails", + "presence_stream", + "public_room_list_stream", + "push_rules_stream", + "received_transactions", + "redactions", + "rejections", "remote_media_cache", "remote_media_cache_thumbnails", - "redactions", - "event_edges", - "event_auth", - "received_transactions", + "room_memberships", + "room_names", + "rooms", "sent_transactions", - "transaction_id_to_pdu", - "users", + "state_events", + "state_group_edges", "state_groups", "state_groups_state", - "event_to_state_groups", - "rejections", - "event_search", - "presence_stream", - "push_rules_stream", - "ex_outlier_stream", - "cache_invalidation_stream_by_instance", - "public_room_list_stream", - "state_group_edges", "stream_ordering_to_exterm", + "topics", + "transaction_id_to_pdu", + "un_partial_stated_event_stream", + "users", ] From 356ea4e09b706d81a7b12f4102d6be681620e283 Mon Sep 17 00:00:00 2001 From: jahway603 <64485701+jahway603@users.noreply.github.com> Date: Mon, 20 Feb 2023 09:29:13 -0500 Subject: [PATCH 216/344] Update database_maintenance_tools.md (#15083) * Update database_maintenance_tools.md Included a blog post by Jackson Chen, which DID work when I followed it to perform Matrix Synapse Maintenance, versus the 2020 blog post by Victor Berger, which DID NOT work when performining maintenance. * Update database_maintenance_tools.md * Rephrasing --- changelog.d/15083.doc | 1 + docs/usage/administration/database_maintenance_tools.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15083.doc diff --git a/changelog.d/15083.doc b/changelog.d/15083.doc new file mode 100644 index 000000000000..5385f4884aa1 --- /dev/null +++ b/changelog.d/15083.doc @@ -0,0 +1 @@ +Refer to a more recent blog post on the [Database Maintenance Tools](https://matrix-org.github.io/synapse/latest/usage/administration/database_maintenance_tools.html) page. Contributed by @jahway603. diff --git a/docs/usage/administration/database_maintenance_tools.md b/docs/usage/administration/database_maintenance_tools.md index 92b805d413cb..e19380db07be 100644 --- a/docs/usage/administration/database_maintenance_tools.md +++ b/docs/usage/administration/database_maintenance_tools.md @@ -1,4 +1,4 @@ -This blog post by Victor Berger explains how to use many of the tools listed on this page: https://levans.fr/shrink-synapse-database.html +_This [blog post by Jackson Chen](https://jacksonchen666.com/posts/2022-12-03/14-33-00/) (Dec 2022) explains how to use many of the tools listed on this page. There is also an [earlier blog by Victor Berger](https://levans.fr/shrink-synapse-database.html) (June 2020), though this may be outdated in places._ # List of useful tools and scripts for maintenance Synapse database: @@ -15,4 +15,4 @@ The purge history API allows server admins to purge historic events from their d Tool for compressing (deduplicating) `state_groups_state` table. ## [SQL for analyzing Synapse PostgreSQL database stats](useful_sql_for_admins.md) -Some easy SQL that reports useful stats about your Synapse database. \ No newline at end of file +Some easy SQL that reports useful stats about your Synapse database. From bb374f43201d579b4a5afb5a8bbc99b5699a12ee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 20 Feb 2023 17:33:24 +0100 Subject: [PATCH 217/344] Add `matrix-org-archive-keyring` package as `Recommends` (#15110) This is so installations will pull in the keyring package, allowing us to update the expiry time of the `packages.matrix.org` repository. --- debian/changelog | 6 ++++++ debian/control | 1 + 2 files changed, 7 insertions(+) diff --git a/debian/changelog b/debian/changelog index ea651438f1dc..71e8bc0d6c5e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.77.0ubuntu1) UNRELEASED; urgency=medium + + * Add `matrix-org-archive-keyring` package as recommended. + + -- Synapse Packaging team Mon, 20 Feb 2023 15:41:41 +0000 + matrix-synapse-py3 (1.77.0) stable; urgency=medium * New Synapse release 1.77.0. diff --git a/debian/control b/debian/control index bc628cec085e..2ff55db5de62 100644 --- a/debian/control +++ b/debian/control @@ -37,6 +37,7 @@ Depends: # so we put perl:Depends in Suggests rather than Depends. Recommends: ${shlibs1:Recommends}, + matrix-org-archive-keyring, Suggests: sqlite3, ${perl:Depends}, From 8cede528a821943f60978b17c86516fb16fe2f00 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 21 Feb 2023 11:03:02 +0000 Subject: [PATCH 218/344] Upper-bound frozendict dependency (#15114) * Upper-bound frozendict dependency This is an ugly kludge to solve https://github.com/matrix-org/synapse/issues/15109. It is not the most friendly thing to do for downstream packagers (apologies), but we are a) running low on time at the moment, and b) seeking to remove frozendict anyway. * Changelog --- changelog.d/15114.misc | 1 + poetry.lock | 2 +- pyproject.toml | 4 +++- 3 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15114.misc diff --git a/changelog.d/15114.misc b/changelog.d/15114.misc new file mode 100644 index 000000000000..8e08d632e066 --- /dev/null +++ b/changelog.d/15114.misc @@ -0,0 +1 @@ +Upper-bound frozendict dependency. This works around us being unable to test installing our wheels against Python 3.11 in CI. diff --git a/poetry.lock b/poetry.lock index ffb230139c46..4d724ab782a5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3030,4 +3030,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "95cb043fa56e1e3275ba7f74b68b2191bd5886eea3e06b8cd370d7fc9fea3c07" +content-hash = "e12077711e5ff83f3c6038ea44c37bd49773799ec8245035b01094b7800c5c92" diff --git a/pyproject.toml b/pyproject.toml index 7f74b552c194..1444642c89d1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -154,7 +154,9 @@ python = "^3.7.1" # we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0 jsonschema = ">=3.0.0" # frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41 -frozendict = ">=1,!=2.1.2" +# We cannot test our wheels against the 2.3.5 release in CI. Putting in an upper bound for this +# because frozendict has been more trouble than it's worth; we would like to move to immutabledict. +frozendict = ">=1,!=2.1.2,<2.3.5" # We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0 unpaddedbase64 = ">=2.1.0" # We require 1.5.0 to work around an issue when running against the C implementation of From addd12f16dc35a4f82cb48807719909e7aed9dcb Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 21 Feb 2023 12:26:00 +0000 Subject: [PATCH 219/344] Tweak logging for when a worker waits for its view of a replication stream to catch up. (#15120)Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> * Improve logging messages for the 'wait for repl stream' read-after-write consistency feature * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * Update synapse/replication/tcp/client.py Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --------- Signed-off-by: Olivier Wilkinson (reivilibre) Co-authored-by: Sean Quah <8349537+squahtx@users.noreply.github.com> --- changelog.d/15120.misc | 1 + synapse/replication/tcp/client.py | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15120.misc diff --git a/changelog.d/15120.misc b/changelog.d/15120.misc new file mode 100644 index 000000000000..ebbc0c90276d --- /dev/null +++ b/changelog.d/15120.misc @@ -0,0 +1 @@ +Tweak logging for when a worker waits for its view of a replication stream to catch up. \ No newline at end of file diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index cc0528bd8e5f..424854efbe52 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -370,15 +370,23 @@ async def wait_for_stream_position( # We measure here to get in flight counts and average waiting time. with Measure(self._clock, "repl.wait_for_stream_position"): logger.info( - "Waiting for repl stream %r to reach %s (%s)", + "Waiting for repl stream %r to reach %s (%s); currently at: %s", stream_name, position, instance_name, + current_position, ) try: await make_deferred_yieldable(deferred) except defer.TimeoutError: - logger.error("Timed out waiting for stream %s", stream_name) + logger.error( + "Timed out waiting for repl stream %r to reach %s (%s)" + "; currently at: %s", + stream_name, + position, + instance_name, + self._streams[stream_name].current_token(instance_name), + ) return logger.info( From a3d471e92938ad4cd2a7fc49e651791ba4d328d3 Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 21 Feb 2023 14:37:44 +0000 Subject: [PATCH 220/344] 1.78.0rc1 --- CHANGES.md | 91 ++++++++++++++++++++++++++++++++++++++- changelog.d/13755.misc | 1 - changelog.d/13779.bugfix | 1 - changelog.d/14605.bugfix | 1 - changelog.d/14606.misc | 1 - changelog.d/14675.misc | 1 - changelog.d/14742.misc | 1 - changelog.d/14834.misc | 1 - changelog.d/14840.misc | 1 - changelog.d/14892.doc | 1 - changelog.d/14929.misc | 1 - changelog.d/14959.doc | 1 - changelog.d/14964.feature | 1 - changelog.d/14969.feature | 1 - changelog.d/14973.misc | 1 - changelog.d/14977.misc | 1 - changelog.d/14980.misc | 1 - changelog.d/14982.misc | 1 - changelog.d/15004.feature | 1 - changelog.d/15008.misc | 1 - changelog.d/15020.misc | 1 - changelog.d/15022.doc | 1 - changelog.d/15026.misc | 1 - changelog.d/15027.misc | 1 - changelog.d/15028.misc | 1 - changelog.d/15031.misc | 1 - changelog.d/15033.misc | 1 - changelog.d/15034.feature | 1 - changelog.d/15035.misc | 1 - changelog.d/15037.misc | 1 - changelog.d/15038.bugfix | 1 - changelog.d/15040.misc | 1 - changelog.d/15041.misc | 1 - changelog.d/15042.feature | 1 - changelog.d/15043.misc | 1 - changelog.d/15045.feature | 1 - changelog.d/15047.misc | 1 - changelog.d/15052.misc | 1 - changelog.d/15053.misc | 1 - changelog.d/15054.misc | 1 - changelog.d/15059.misc | 1 - changelog.d/15060.misc | 1 - changelog.d/15061.misc | 1 - changelog.d/15062.misc | 1 - changelog.d/15063.misc | 1 - changelog.d/15064.misc | 1 - changelog.d/15065.misc | 1 - changelog.d/15068.bugfix | 1 - changelog.d/15069.misc | 1 - changelog.d/15070.misc | 1 - changelog.d/15072.misc | 1 - changelog.d/15073.feature | 1 - changelog.d/15074.bugfix | 1 - changelog.d/15075.feature | 2 - changelog.d/15078.doc | 1 - changelog.d/15079.bugfix | 1 - changelog.d/15080.bugfix | 1 - changelog.d/15083.doc | 1 - changelog.d/15084.misc | 1 - changelog.d/15096.bugfix | 1 - changelog.d/15099.misc | 1 - changelog.d/15100.misc | 1 - changelog.d/15101.misc | 1 - changelog.d/15102.misc | 1 - changelog.d/15104.misc | 1 - changelog.d/15105.misc | 1 - changelog.d/15108.bugfix | 1 - changelog.d/15114.misc | 1 - changelog.d/15120.misc | 1 - debian/changelog | 5 ++- pyproject.toml | 2 +- 71 files changed, 94 insertions(+), 73 deletions(-) delete mode 100644 changelog.d/13755.misc delete mode 100644 changelog.d/13779.bugfix delete mode 100644 changelog.d/14605.bugfix delete mode 100644 changelog.d/14606.misc delete mode 100644 changelog.d/14675.misc delete mode 100644 changelog.d/14742.misc delete mode 100644 changelog.d/14834.misc delete mode 100644 changelog.d/14840.misc delete mode 100644 changelog.d/14892.doc delete mode 100644 changelog.d/14929.misc delete mode 100644 changelog.d/14959.doc delete mode 100644 changelog.d/14964.feature delete mode 100644 changelog.d/14969.feature delete mode 100644 changelog.d/14973.misc delete mode 100644 changelog.d/14977.misc delete mode 100644 changelog.d/14980.misc delete mode 100644 changelog.d/14982.misc delete mode 100644 changelog.d/15004.feature delete mode 100644 changelog.d/15008.misc delete mode 100644 changelog.d/15020.misc delete mode 100644 changelog.d/15022.doc delete mode 100644 changelog.d/15026.misc delete mode 100644 changelog.d/15027.misc delete mode 100644 changelog.d/15028.misc delete mode 100644 changelog.d/15031.misc delete mode 100644 changelog.d/15033.misc delete mode 100644 changelog.d/15034.feature delete mode 100644 changelog.d/15035.misc delete mode 100644 changelog.d/15037.misc delete mode 100644 changelog.d/15038.bugfix delete mode 100644 changelog.d/15040.misc delete mode 100644 changelog.d/15041.misc delete mode 100644 changelog.d/15042.feature delete mode 100644 changelog.d/15043.misc delete mode 100644 changelog.d/15045.feature delete mode 100644 changelog.d/15047.misc delete mode 100644 changelog.d/15052.misc delete mode 100644 changelog.d/15053.misc delete mode 100644 changelog.d/15054.misc delete mode 100644 changelog.d/15059.misc delete mode 100644 changelog.d/15060.misc delete mode 100644 changelog.d/15061.misc delete mode 100644 changelog.d/15062.misc delete mode 100644 changelog.d/15063.misc delete mode 100644 changelog.d/15064.misc delete mode 100644 changelog.d/15065.misc delete mode 100644 changelog.d/15068.bugfix delete mode 100644 changelog.d/15069.misc delete mode 100644 changelog.d/15070.misc delete mode 100644 changelog.d/15072.misc delete mode 100644 changelog.d/15073.feature delete mode 100644 changelog.d/15074.bugfix delete mode 100644 changelog.d/15075.feature delete mode 100644 changelog.d/15078.doc delete mode 100644 changelog.d/15079.bugfix delete mode 100644 changelog.d/15080.bugfix delete mode 100644 changelog.d/15083.doc delete mode 100644 changelog.d/15084.misc delete mode 100644 changelog.d/15096.bugfix delete mode 100644 changelog.d/15099.misc delete mode 100644 changelog.d/15100.misc delete mode 100644 changelog.d/15101.misc delete mode 100644 changelog.d/15102.misc delete mode 100644 changelog.d/15104.misc delete mode 100644 changelog.d/15105.misc delete mode 100644 changelog.d/15108.bugfix delete mode 100644 changelog.d/15114.misc delete mode 100644 changelog.d/15120.misc diff --git a/CHANGES.md b/CHANGES.md index a62bd4eb28a3..01b81fe17454 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,92 @@ +Synapse 1.78.0rc1 (2023-02-21) +============================== + +Features +-------- + +- Implement the experimental `exact_event_match` push rule condition from [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758). ([\#14964](https://github.com/matrix-org/synapse/issues/14964)) +- Add account data to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.78/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#14969](https://github.com/matrix-org/synapse/issues/14969)) +- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to disambiguate push rule keys with dots in them. ([\#15004](https://github.com/matrix-org/synapse/issues/15004)) +- Allow Synapse to use a specific Redis [logical database](https://redis.io/commands/select/) in worker-mode deployments. ([\#15034](https://github.com/matrix-org/synapse/issues/15034)) +- Tag opentracing spans for federation requests with the name of the worker serving the request. ([\#15042](https://github.com/matrix-org/synapse/issues/15042)) +- Experimental support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): the `exact_event_property_contains` push rule condition. ([\#15045](https://github.com/matrix-org/synapse/issues/15045)) +- Remove spurious `dont_notify` action from the defaults for the `.m.rule.reaction` pushrule. ([\#15073](https://github.com/matrix-org/synapse/issues/15073)) +- Update the error code returned when user sends a duplicate annotation. ([\#15075](https://github.com/matrix-org/synapse/issues/15075)) + + +Bugfixes +-------- + +- Prevent clients from reporting nonexistent events. ([\#13779](https://github.com/matrix-org/synapse/issues/13779)) +- Return spec-compliant JSON errors when unknown endpoints are requested. ([\#14605](https://github.com/matrix-org/synapse/issues/14605)) +- Fix a long-standing bug where the room aliases returned could be corrupted. ([\#15038](https://github.com/matrix-org/synapse/issues/15038)) +- Fix a bug introduced in Synapse 1.76.0 where partially-joined rooms could not be deleted using the [purge room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api). ([\#15068](https://github.com/matrix-org/synapse/issues/15068)) +- Fix a long-standing bug where federated joins would fail if the first server in the list of servers to try is not in the room. ([\#15074](https://github.com/matrix-org/synapse/issues/15074)) +- Fix a bug introduced in Synapse v1.74.0 where searching with colons when using ICU for search term tokenisation would fail with an error. ([\#15079](https://github.com/matrix-org/synapse/issues/15079)) +- Reduce the likelihood of a rare race condition where rejoining a restricted room over federation would fail. ([\#15080](https://github.com/matrix-org/synapse/issues/15080)) +- Fix a bug introduced in Synapse 1.76 where workers would fail to start if the `health` listener was configured. ([\#15096](https://github.com/matrix-org/synapse/issues/15096)) +- Fix a bug introduced in Synapse 1.75 where the [portdb script](https://matrix-org.github.io/synapse/release-v1.78/postgres.html#porting-from-sqlite) would fail to run after a room had been faster-joined. ([\#15108](https://github.com/matrix-org/synapse/issues/15108)) + + +Improved Documentation +---------------------- + +- Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. ([\#14892](https://github.com/matrix-org/synapse/issues/14892)) +- Update delegation documentation to clarify that SRV DNS delegation does not eliminate all needs to serve files from .well-known locations. Contributed by @williamkray. ([\#14959](https://github.com/matrix-org/synapse/issues/14959)) +- Document how to start Synapse in the contributing guide. ([\#15022](https://github.com/matrix-org/synapse/issues/15022)) +- Fix a mistake in registration_shared_secret_path docs. ([\#15078](https://github.com/matrix-org/synapse/issues/15078)) +- Refer to a more recent blog post on the [Database Maintenance Tools](https://matrix-org.github.io/synapse/latest/usage/administration/database_maintenance_tools.html) page. Contributed by @jahway603. ([\#15083](https://github.com/matrix-org/synapse/issues/15083)) + + +Internal Changes +---------------- + +- Re-type hint some collections as read-only. ([\#13755](https://github.com/matrix-org/synapse/issues/13755)) +- Faster joins: don't stall when another user joins during a partial-state room resync. ([\#14606](https://github.com/matrix-org/synapse/issues/14606)) +- Add a class `UnpersistedEventContext` to allow for the batching up of storing state groups. ([\#14675](https://github.com/matrix-org/synapse/issues/14675)) +- Add a check to ensure that locked dependencies have source distributions available. ([\#14742](https://github.com/matrix-org/synapse/issues/14742)) +- Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. ([\#14834](https://github.com/matrix-org/synapse/issues/14834)) +- Prevent 'WARNING: there is already a transaction in progress' lines appearing in PostgreSQL's logs on some occasions. ([\#14840](https://github.com/matrix-org/synapse/issues/14840)) +- Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14929](https://github.com/matrix-org/synapse/issues/14929)) +- Improve performance of `/sync` in a few situations. ([\#14973](https://github.com/matrix-org/synapse/issues/14973)) +- Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room. ([\#14977](https://github.com/matrix-org/synapse/issues/14977)) +- Skip calculating unread push actions in /sync when enable_push is false. ([\#14980](https://github.com/matrix-org/synapse/issues/14980)) +- Add a schema dump symlinks inside `contrib`, to make it easier for IDEs to interrogate Synapse's database schema. ([\#14982](https://github.com/matrix-org/synapse/issues/14982)) +- Improve type hints. ([\#15008](https://github.com/matrix-org/synapse/issues/15008), [\#15026](https://github.com/matrix-org/synapse/issues/15026), [\#15027](https://github.com/matrix-org/synapse/issues/15027), [\#15028](https://github.com/matrix-org/synapse/issues/15028), [\#15031](https://github.com/matrix-org/synapse/issues/15031), [\#15035](https://github.com/matrix-org/synapse/issues/15035), [\#15052](https://github.com/matrix-org/synapse/issues/15052), [\#15072](https://github.com/matrix-org/synapse/issues/15072), [\#15084](https://github.com/matrix-org/synapse/issues/15084)) +- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15037](https://github.com/matrix-org/synapse/issues/15037)) +- Avoid mutating a cached value in `get_user_devices_from_cache`. ([\#15040](https://github.com/matrix-org/synapse/issues/15040)) +- Fix a rare exception in logs on start up. ([\#15041](https://github.com/matrix-org/synapse/issues/15041)) +- Update pyo3-log to v0.8.1. ([\#15043](https://github.com/matrix-org/synapse/issues/15043)) +- Avoid mutating cached values in `_generate_sync_entry_for_account_data`. ([\#15047](https://github.com/matrix-org/synapse/issues/15047)) +- Refactor arguments of `try_unbind_threepid` and `_try_unbind_threepid_with_id_server` to not use dictionaries. ([\#15053](https://github.com/matrix-org/synapse/issues/15053)) +- Merge debug logging from the hotfixes branch. ([\#15054](https://github.com/matrix-org/synapse/issues/15054)) +- Faster joins: omit device list updates originating from partial state rooms in /sync responses without lazy loading of members enabled. ([\#15069](https://github.com/matrix-org/synapse/issues/15069)) +- Fix clashing database transaction name. ([\#15070](https://github.com/matrix-org/synapse/issues/15070)) +- Upper-bound frozendict dependency. This works around us being unable to test installing our wheels against Python 3.11 in CI. ([\#15114](https://github.com/matrix-org/synapse/issues/15114)) +- Tweak logging for when a worker waits for its view of a replication stream to catch up. ([\#15120](https://github.com/matrix-org/synapse/issues/15120)) + +
Locked dependency updates + +- Bump bleach from 5.0.1 to 6.0.0. ([\#15059](https://github.com/matrix-org/synapse/issues/15059)) +- Bump cryptography from 38.0.4 to 39.0.1. ([\#15020](https://github.com/matrix-org/synapse/issues/15020)) +- Bump ruff version from 0.0.230 to 0.0.237. ([\#15033](https://github.com/matrix-org/synapse/issues/15033)) +- Bump dtolnay/rust-toolchain from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3. ([\#15060](https://github.com/matrix-org/synapse/issues/15060)) +- Bump systemd-python from 234 to 235. ([\#15061](https://github.com/matrix-org/synapse/issues/15061)) +- Bump serde_json from 1.0.92 to 1.0.93. ([\#15062](https://github.com/matrix-org/synapse/issues/15062)) +- Bump types-requests from 2.28.11.8 to 2.28.11.12. ([\#15063](https://github.com/matrix-org/synapse/issues/15063)) +- Bump types-pillow from 9.4.0.5 to 9.4.0.10. ([\#15064](https://github.com/matrix-org/synapse/issues/15064)) +- Bump sentry-sdk from 1.13.0 to 1.15.0. ([\#15065](https://github.com/matrix-org/synapse/issues/15065)) +- Bump types-jsonschema from 4.17.0.3 to 4.17.0.5. ([\#15099](https://github.com/matrix-org/synapse/issues/15099)) +- Bump types-bleach from 5.0.3.1 to 6.0.0.0. ([\#15100](https://github.com/matrix-org/synapse/issues/15100)) +- Bump dtolnay/rust-toolchain from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295. ([\#15101](https://github.com/matrix-org/synapse/issues/15101)) +- Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0. ([\#15102](https://github.com/matrix-org/synapse/issues/15102)) +- Bump types-pillow from 9.4.0.10 to 9.4.0.13. ([\#15104](https://github.com/matrix-org/synapse/issues/15104)) +- Bump types-setuptools from 67.1.0.0 to 67.3.0.1. ([\#15105](https://github.com/matrix-org/synapse/issues/15105)) + + +
+ + Synapse 1.77.0 (2023-02-14) =========================== @@ -63,7 +152,7 @@ Internal Changes - Preparatory work for adding a denormalised event stream ordering column in the future. Contributed by Nick @ Beeper (@fizzadar). ([\#14979](https://github.com/matrix-org/synapse/issues/14979), [9cd7610](https://github.com/matrix-org/synapse/commit/9cd7610f86ab5051c9365dd38d1eec405a5f8ca6), [f10caa7](https://github.com/matrix-org/synapse/commit/f10caa73eee0caa91cf373966104d1ededae2aee); see [\#15014](https://github.com/matrix-org/synapse/issues/15014)) - Add tests for `_flatten_dict`. ([\#14981](https://github.com/matrix-org/synapse/issues/14981), [\#15002](https://github.com/matrix-org/synapse/issues/15002)) -
Dependabot updates +
Locked dependency updates - Bump dtolnay/rust-toolchain from e645b0cf01249a964ec099494d38d2da0f0b349f to 9cd00a88a73addc8617065438eff914dd08d0955. ([\#14968](https://github.com/matrix-org/synapse/issues/14968)) - Bump docker/build-push-action from 3 to 4. ([\#14952](https://github.com/matrix-org/synapse/issues/14952)) diff --git a/changelog.d/13755.misc b/changelog.d/13755.misc deleted file mode 100644 index 662ee00e99d5..000000000000 --- a/changelog.d/13755.misc +++ /dev/null @@ -1 +0,0 @@ -Re-type hint some collections as read-only. diff --git a/changelog.d/13779.bugfix b/changelog.d/13779.bugfix deleted file mode 100644 index a92c722c6ea6..000000000000 --- a/changelog.d/13779.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent clients from reporting nonexistent events. \ No newline at end of file diff --git a/changelog.d/14605.bugfix b/changelog.d/14605.bugfix deleted file mode 100644 index cb95a87d9219..000000000000 --- a/changelog.d/14605.bugfix +++ /dev/null @@ -1 +0,0 @@ -Return spec-compliant JSON errors when unknown endpoints are requested. diff --git a/changelog.d/14606.misc b/changelog.d/14606.misc deleted file mode 100644 index e2debc96d8d4..000000000000 --- a/changelog.d/14606.misc +++ /dev/null @@ -1 +0,0 @@ -Faster joins: don't stall when another user joins during a fast join resync. diff --git a/changelog.d/14675.misc b/changelog.d/14675.misc deleted file mode 100644 index bc1ac1c82aeb..000000000000 --- a/changelog.d/14675.misc +++ /dev/null @@ -1 +0,0 @@ -Add a class UnpersistedEventContext to allow for the batching up of storing state groups. diff --git a/changelog.d/14742.misc b/changelog.d/14742.misc deleted file mode 100644 index c0b5d2c0620e..000000000000 --- a/changelog.d/14742.misc +++ /dev/null @@ -1 +0,0 @@ -Add check to ensure locked dependencies have source distributions available. \ No newline at end of file diff --git a/changelog.d/14834.misc b/changelog.d/14834.misc deleted file mode 100644 index e683212dc463..000000000000 --- a/changelog.d/14834.misc +++ /dev/null @@ -1 +0,0 @@ -Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. \ No newline at end of file diff --git a/changelog.d/14840.misc b/changelog.d/14840.misc deleted file mode 100644 index ff6084284a5e..000000000000 --- a/changelog.d/14840.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent "WARNING: there is already a transaction in progress" lines appearing in PostgreSQL's logs on some occasions. \ No newline at end of file diff --git a/changelog.d/14892.doc b/changelog.d/14892.doc deleted file mode 100644 index 2bc3ad06c6b6..000000000000 --- a/changelog.d/14892.doc +++ /dev/null @@ -1 +0,0 @@ -Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. diff --git a/changelog.d/14929.misc b/changelog.d/14929.misc deleted file mode 100644 index 2cc3614dfded..000000000000 --- a/changelog.d/14929.misc +++ /dev/null @@ -1 +0,0 @@ -Use `StrCollection` to avoid potential bugs with `Collection[str]`. diff --git a/changelog.d/14959.doc b/changelog.d/14959.doc deleted file mode 100644 index 45edf1a76520..000000000000 --- a/changelog.d/14959.doc +++ /dev/null @@ -1 +0,0 @@ -Update delegation documentation to clarify that SRV DNS delegation does not eliminate all needs to serve files from .well-known locations. Contributed by @williamkray. diff --git a/changelog.d/14964.feature b/changelog.d/14964.feature deleted file mode 100644 index 13c0bc193b8e..000000000000 --- a/changelog.d/14964.feature +++ /dev/null @@ -1 +0,0 @@ -Implement the experimental `exact_event_match` push rule condition from [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758). diff --git a/changelog.d/14969.feature b/changelog.d/14969.feature deleted file mode 100644 index a4680ef9c89b..000000000000 --- a/changelog.d/14969.feature +++ /dev/null @@ -1 +0,0 @@ -Add account data to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.78/usage/administration/admin_faq.html#how-can-i-export-user-data). \ No newline at end of file diff --git a/changelog.d/14973.misc b/changelog.d/14973.misc deleted file mode 100644 index 365762360285..000000000000 --- a/changelog.d/14973.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of `/sync` in a few situations. diff --git a/changelog.d/14977.misc b/changelog.d/14977.misc deleted file mode 100644 index 4d551c52b7db..000000000000 --- a/changelog.d/14977.misc +++ /dev/null @@ -1 +0,0 @@ -Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room. \ No newline at end of file diff --git a/changelog.d/14980.misc b/changelog.d/14980.misc deleted file mode 100644 index 145f4a788b8e..000000000000 --- a/changelog.d/14980.misc +++ /dev/null @@ -1 +0,0 @@ -Skip calculating unread push actions in /sync when enable_push is false. diff --git a/changelog.d/14982.misc b/changelog.d/14982.misc deleted file mode 100644 index 9aaa7ce264ed..000000000000 --- a/changelog.d/14982.misc +++ /dev/null @@ -1 +0,0 @@ -Add a schema dump symlinks inside `contrib`, to make it easier for IDEs to interrogate Synapse's database schema. diff --git a/changelog.d/15004.feature b/changelog.d/15004.feature deleted file mode 100644 index d11d0aca91da..000000000000 --- a/changelog.d/15004.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to unambiguate push rule keys with dots in them. diff --git a/changelog.d/15008.misc b/changelog.d/15008.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15008.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15020.misc b/changelog.d/15020.misc deleted file mode 100644 index c5290283f0b5..000000000000 --- a/changelog.d/15020.misc +++ /dev/null @@ -1 +0,0 @@ -Bump cryptography from 38.0.4 to 39.0.1. diff --git a/changelog.d/15022.doc b/changelog.d/15022.doc deleted file mode 100644 index e1627c20cbc9..000000000000 --- a/changelog.d/15022.doc +++ /dev/null @@ -1 +0,0 @@ -Document how to start Synapse in the contributing guide. diff --git a/changelog.d/15026.misc b/changelog.d/15026.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15026.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15027.misc b/changelog.d/15027.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15027.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15028.misc b/changelog.d/15028.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15028.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15031.misc b/changelog.d/15031.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15031.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15033.misc b/changelog.d/15033.misc deleted file mode 100644 index 83dc3a75b6b6..000000000000 --- a/changelog.d/15033.misc +++ /dev/null @@ -1 +0,0 @@ -Bump ruff version from 0.0.230 to 0.0.237. diff --git a/changelog.d/15034.feature b/changelog.d/15034.feature deleted file mode 100644 index 34f320da9200..000000000000 --- a/changelog.d/15034.feature +++ /dev/null @@ -1 +0,0 @@ -Allow Synapse to use a specific Redis [logical database](https://redis.io/commands/select/) in worker-mode deployments. diff --git a/changelog.d/15035.misc b/changelog.d/15035.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15035.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15037.misc b/changelog.d/15037.misc deleted file mode 100644 index fabfe77d35af..000000000000 --- a/changelog.d/15037.misc +++ /dev/null @@ -1 +0,0 @@ -Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. diff --git a/changelog.d/15038.bugfix b/changelog.d/15038.bugfix deleted file mode 100644 index 4695a097561c..000000000000 --- a/changelog.d/15038.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the room aliases returned could be corrupted. diff --git a/changelog.d/15040.misc b/changelog.d/15040.misc deleted file mode 100644 index ca129b64afc5..000000000000 --- a/changelog.d/15040.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid mutating a cached value in `get_user_devices_from_cache`. diff --git a/changelog.d/15041.misc b/changelog.d/15041.misc deleted file mode 100644 index d602b0043a85..000000000000 --- a/changelog.d/15041.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a rare exception in logs on start up. diff --git a/changelog.d/15042.feature b/changelog.d/15042.feature deleted file mode 100644 index 7a4de89f0070..000000000000 --- a/changelog.d/15042.feature +++ /dev/null @@ -1 +0,0 @@ -Tag opentracing spans for federation requests with the name of the worker serving the request. diff --git a/changelog.d/15043.misc b/changelog.d/15043.misc deleted file mode 100644 index cb1839412317..000000000000 --- a/changelog.d/15043.misc +++ /dev/null @@ -1 +0,0 @@ -Update pyo3-log to v0.8.1. diff --git a/changelog.d/15045.feature b/changelog.d/15045.feature deleted file mode 100644 index 87766befdab8..000000000000 --- a/changelog.d/15045.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): the `exact_event_property_contains` push rule condition. diff --git a/changelog.d/15047.misc b/changelog.d/15047.misc deleted file mode 100644 index 561dc874de0e..000000000000 --- a/changelog.d/15047.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid mutating cached values in `_generate_sync_entry_for_account_data`. diff --git a/changelog.d/15052.misc b/changelog.d/15052.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15052.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15053.misc b/changelog.d/15053.misc deleted file mode 100644 index c27528f5c6e5..000000000000 --- a/changelog.d/15053.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor arguments of `try_unbind_threepid` and `_try_unbind_threepid_with_id_server` to not use dictionaries. \ No newline at end of file diff --git a/changelog.d/15054.misc b/changelog.d/15054.misc deleted file mode 100644 index d800b107cf14..000000000000 --- a/changelog.d/15054.misc +++ /dev/null @@ -1 +0,0 @@ -Merge debug logging from the hotfixes branch. diff --git a/changelog.d/15059.misc b/changelog.d/15059.misc deleted file mode 100644 index e962b208fdf5..000000000000 --- a/changelog.d/15059.misc +++ /dev/null @@ -1 +0,0 @@ -Bump bleach from 5.0.1 to 6.0.0. diff --git a/changelog.d/15060.misc b/changelog.d/15060.misc deleted file mode 100644 index 5b99e0600359..000000000000 --- a/changelog.d/15060.misc +++ /dev/null @@ -1 +0,0 @@ -Bump dtolnay/rust-toolchain from 9cd00a88a73addc8617065438eff914dd08d0955 to 25dc93b901a87e864900a8aec6c12e9aa794c0c3. diff --git a/changelog.d/15061.misc b/changelog.d/15061.misc deleted file mode 100644 index 40017827a266..000000000000 --- a/changelog.d/15061.misc +++ /dev/null @@ -1 +0,0 @@ -Bump systemd-python from 234 to 235. diff --git a/changelog.d/15062.misc b/changelog.d/15062.misc deleted file mode 100644 index adc194063026..000000000000 --- a/changelog.d/15062.misc +++ /dev/null @@ -1 +0,0 @@ -Bump serde_json from 1.0.92 to 1.0.93. diff --git a/changelog.d/15063.misc b/changelog.d/15063.misc deleted file mode 100644 index b52e1faed0f4..000000000000 --- a/changelog.d/15063.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-requests from 2.28.11.8 to 2.28.11.12. diff --git a/changelog.d/15064.misc b/changelog.d/15064.misc deleted file mode 100644 index 644d4bb2304f..000000000000 --- a/changelog.d/15064.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pillow from 9.4.0.5 to 9.4.0.10. diff --git a/changelog.d/15065.misc b/changelog.d/15065.misc deleted file mode 100644 index df2f9a773ee7..000000000000 --- a/changelog.d/15065.misc +++ /dev/null @@ -1 +0,0 @@ -Bump sentry-sdk from 1.13.0 to 1.15.0. diff --git a/changelog.d/15068.bugfix b/changelog.d/15068.bugfix deleted file mode 100644 index f09ffa2877bd..000000000000 --- a/changelog.d/15068.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.76.0 where partially-joined rooms could not be deleted using the [purge room API](https://matrix-org.github.io/synapse/latest/admin_api/rooms.html#delete-room-api). diff --git a/changelog.d/15069.misc b/changelog.d/15069.misc deleted file mode 100644 index e7a619ad2b5b..000000000000 --- a/changelog.d/15069.misc +++ /dev/null @@ -1 +0,0 @@ -Faster joins: omit device list updates originating from partial state rooms in /sync responses without lazy loading of members enabled. diff --git a/changelog.d/15070.misc b/changelog.d/15070.misc deleted file mode 100644 index 0f3244de9fa3..000000000000 --- a/changelog.d/15070.misc +++ /dev/null @@ -1 +0,0 @@ -Fix clashing database transaction name. diff --git a/changelog.d/15072.misc b/changelog.d/15072.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15072.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15073.feature b/changelog.d/15073.feature deleted file mode 100644 index 2889e3444f5d..000000000000 --- a/changelog.d/15073.feature +++ /dev/null @@ -1 +0,0 @@ -Remove spurious `dont_notify` action from the defaults for the `.m.rule.reaction` pushrule. diff --git a/changelog.d/15074.bugfix b/changelog.d/15074.bugfix deleted file mode 100644 index d1ceb4f4c89b..000000000000 --- a/changelog.d/15074.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where federated joins would fail if the first server in the list of servers to try is not in the room. diff --git a/changelog.d/15075.feature b/changelog.d/15075.feature deleted file mode 100644 index d25a7567a4fa..000000000000 --- a/changelog.d/15075.feature +++ /dev/null @@ -1,2 +0,0 @@ -Update the error code returned when user sends a duplicate annotation. - diff --git a/changelog.d/15078.doc b/changelog.d/15078.doc deleted file mode 100644 index 641f9a993b47..000000000000 --- a/changelog.d/15078.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a mistake in registration_shared_secret_path docs. \ No newline at end of file diff --git a/changelog.d/15079.bugfix b/changelog.d/15079.bugfix deleted file mode 100644 index 907892c1ef5f..000000000000 --- a/changelog.d/15079.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.74.0 where searching with colons when using ICU for search term tokenisation would fail with an error. \ No newline at end of file diff --git a/changelog.d/15080.bugfix b/changelog.d/15080.bugfix deleted file mode 100644 index 965d0b921e0a..000000000000 --- a/changelog.d/15080.bugfix +++ /dev/null @@ -1 +0,0 @@ -Reduce the likelihood of a rare race condition where rejoining a restricted room over federation would fail. diff --git a/changelog.d/15083.doc b/changelog.d/15083.doc deleted file mode 100644 index 5385f4884aa1..000000000000 --- a/changelog.d/15083.doc +++ /dev/null @@ -1 +0,0 @@ -Refer to a more recent blog post on the [Database Maintenance Tools](https://matrix-org.github.io/synapse/latest/usage/administration/database_maintenance_tools.html) page. Contributed by @jahway603. diff --git a/changelog.d/15084.misc b/changelog.d/15084.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15084.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15096.bugfix b/changelog.d/15096.bugfix deleted file mode 100644 index 09b4d861f8a5..000000000000 --- a/changelog.d/15096.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.76 where workers would fail to start if the `health` listener was configured. diff --git a/changelog.d/15099.misc b/changelog.d/15099.misc deleted file mode 100644 index 53ed621ccf0e..000000000000 --- a/changelog.d/15099.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-jsonschema from 4.17.0.3 to 4.17.0.5. diff --git a/changelog.d/15100.misc b/changelog.d/15100.misc deleted file mode 100644 index 035d09454916..000000000000 --- a/changelog.d/15100.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-bleach from 5.0.3.1 to 6.0.0.0. diff --git a/changelog.d/15101.misc b/changelog.d/15101.misc deleted file mode 100644 index 807dbb554037..000000000000 --- a/changelog.d/15101.misc +++ /dev/null @@ -1 +0,0 @@ -Bump dtolnay/rust-toolchain from 25dc93b901a87e864900a8aec6c12e9aa794c0c3 to e12eda571dc9a5ee5d58eecf4738ec291c66f295. diff --git a/changelog.d/15102.misc b/changelog.d/15102.misc deleted file mode 100644 index bb6037d52e1b..000000000000 --- a/changelog.d/15102.misc +++ /dev/null @@ -1 +0,0 @@ -Bump dawidd6/action-download-artifact from 2.24.3 to 2.25.0. diff --git a/changelog.d/15104.misc b/changelog.d/15104.misc deleted file mode 100644 index e9fa8759263e..000000000000 --- a/changelog.d/15104.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pillow from 9.4.0.10 to 9.4.0.13. diff --git a/changelog.d/15105.misc b/changelog.d/15105.misc deleted file mode 100644 index 5bce811d32f6..000000000000 --- a/changelog.d/15105.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 67.1.0.0 to 67.3.0.1. diff --git a/changelog.d/15108.bugfix b/changelog.d/15108.bugfix deleted file mode 100644 index 30af8b439d1b..000000000000 --- a/changelog.d/15108.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.75 where the [portdb script](https://matrix-org.github.io/synapse/release-v1.78/postgres.html#porting-from-sqlite) would fail to run after a room had been faster-joined. diff --git a/changelog.d/15114.misc b/changelog.d/15114.misc deleted file mode 100644 index 8e08d632e066..000000000000 --- a/changelog.d/15114.misc +++ /dev/null @@ -1 +0,0 @@ -Upper-bound frozendict dependency. This works around us being unable to test installing our wheels against Python 3.11 in CI. diff --git a/changelog.d/15120.misc b/changelog.d/15120.misc deleted file mode 100644 index ebbc0c90276d..000000000000 --- a/changelog.d/15120.misc +++ /dev/null @@ -1 +0,0 @@ -Tweak logging for when a worker waits for its view of a replication stream to catch up. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 71e8bc0d6c5e..f9e95ee5e2e9 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,9 @@ -matrix-synapse-py3 (1.77.0ubuntu1) UNRELEASED; urgency=medium +matrix-synapse-py3 (1.78.0~rc1) stable; urgency=medium * Add `matrix-org-archive-keyring` package as recommended. + * New Synapse release 1.78.0rc1. - -- Synapse Packaging team Mon, 20 Feb 2023 15:41:41 +0000 + -- Synapse Packaging team Tue, 21 Feb 2023 14:29:19 +0000 matrix-synapse-py3 (1.77.0) stable; urgency=medium diff --git a/pyproject.toml b/pyproject.toml index 1444642c89d1..cef7d295c131 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.77.0" +version = "1.78.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 8219525b66a2ffa9a9f1ed6e5c716fcc1146469e Mon Sep 17 00:00:00 2001 From: "Olivier Wilkinson (reivilibre)" Date: Tue, 21 Feb 2023 16:17:37 +0000 Subject: [PATCH 221/344] Tweak changelog --- CHANGES.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 01b81fe17454..f5c19bcb9763 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -9,7 +9,7 @@ Features - Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to disambiguate push rule keys with dots in them. ([\#15004](https://github.com/matrix-org/synapse/issues/15004)) - Allow Synapse to use a specific Redis [logical database](https://redis.io/commands/select/) in worker-mode deployments. ([\#15034](https://github.com/matrix-org/synapse/issues/15034)) - Tag opentracing spans for federation requests with the name of the worker serving the request. ([\#15042](https://github.com/matrix-org/synapse/issues/15042)) -- Experimental support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): the `exact_event_property_contains` push rule condition. ([\#15045](https://github.com/matrix-org/synapse/issues/15045)) +- Implement the experimental `exact_event_property_contains` push rule condition from [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966). ([\#15045](https://github.com/matrix-org/synapse/issues/15045)) - Remove spurious `dont_notify` action from the defaults for the `.m.rule.reaction` pushrule. ([\#15073](https://github.com/matrix-org/synapse/issues/15073)) - Update the error code returned when user sends a duplicate annotation. ([\#15075](https://github.com/matrix-org/synapse/issues/15075)) @@ -31,9 +31,8 @@ Bugfixes Improved Documentation ---------------------- -- Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. ([\#14892](https://github.com/matrix-org/synapse/issues/14892)) +- Document how to start Synapse with Poetry. Contributed by @thezaidbintariq. ([\#14892](https://github.com/matrix-org/synapse/issues/14892), [\#15022](https://github.com/matrix-org/synapse/issues/15022)) - Update delegation documentation to clarify that SRV DNS delegation does not eliminate all needs to serve files from .well-known locations. Contributed by @williamkray. ([\#14959](https://github.com/matrix-org/synapse/issues/14959)) -- Document how to start Synapse in the contributing guide. ([\#15022](https://github.com/matrix-org/synapse/issues/15022)) - Fix a mistake in registration_shared_secret_path docs. ([\#15078](https://github.com/matrix-org/synapse/issues/15078)) - Refer to a more recent blog post on the [Database Maintenance Tools](https://matrix-org.github.io/synapse/latest/usage/administration/database_maintenance_tools.html) page. Contributed by @jahway603. ([\#15083](https://github.com/matrix-org/synapse/issues/15083)) @@ -46,7 +45,7 @@ Internal Changes - Add a class `UnpersistedEventContext` to allow for the batching up of storing state groups. ([\#14675](https://github.com/matrix-org/synapse/issues/14675)) - Add a check to ensure that locked dependencies have source distributions available. ([\#14742](https://github.com/matrix-org/synapse/issues/14742)) - Tweak comment on `_is_local_room_accessible` as part of room visibility in `/hierarchy` to clarify the condition for a room being visible. ([\#14834](https://github.com/matrix-org/synapse/issues/14834)) -- Prevent 'WARNING: there is already a transaction in progress' lines appearing in PostgreSQL's logs on some occasions. ([\#14840](https://github.com/matrix-org/synapse/issues/14840)) +- Prevent `WARNING: there is already a transaction in progress` lines appearing in PostgreSQL's logs on some occasions. ([\#14840](https://github.com/matrix-org/synapse/issues/14840)) - Use `StrCollection` to avoid potential bugs with `Collection[str]`. ([\#14929](https://github.com/matrix-org/synapse/issues/14929)) - Improve performance of `/sync` in a few situations. ([\#14973](https://github.com/matrix-org/synapse/issues/14973)) - Limit concurrent event creation for a room to avoid state resolution when sending bursts of events to a local room. ([\#14977](https://github.com/matrix-org/synapse/issues/14977)) From 647ff3ef65e7a54b2719755802b4e6f2f45f5eb6 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Wed, 22 Feb 2023 11:07:28 +0000 Subject: [PATCH 222/344] Remove unused `room_alias` field from `/createRoom` response (#15093) * Change `create_room` return type * Don't return room alias from /createRoom * Update other callsites * Fix up mypy complaints It looks like new_room_user_id is None iff new_room_id is None. It's a shame we haven't expressed this in a way that mypy can understand. * Changelog --- changelog.d/15093.bugfix | 1 + synapse/handlers/register.py | 4 +- synapse/handlers/room.py | 38 +++++++++---------- synapse/module_api/__init__.py | 6 +-- synapse/rest/client/room.py | 4 +- .../server_notices/server_notices_manager.py | 3 +- tests/storage/test_cleanup_extrems.py | 8 ++-- tests/storage/test_event_metrics.py | 3 +- tests/storage/test_receipts.py | 10 +++-- tests/test_federation.py | 2 +- 10 files changed, 40 insertions(+), 39 deletions(-) create mode 100644 changelog.d/15093.bugfix diff --git a/changelog.d/15093.bugfix b/changelog.d/15093.bugfix new file mode 100644 index 000000000000..00f1c1939171 --- /dev/null +++ b/changelog.d/15093.bugfix @@ -0,0 +1 @@ +Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c611efb760e4..e4e506e62c5e 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -476,7 +476,7 @@ async def _create_and_join_rooms(self, user_id: str) -> None: # create room expects the localpart of the room alias config["room_alias_name"] = room_alias.localpart - info, _ = await room_creation_handler.create_room( + room_id, _, _ = await room_creation_handler.create_room( fake_requester, config=config, ratelimit=False, @@ -490,7 +490,7 @@ async def _create_and_join_rooms(self, user_id: str) -> None: user_id, authenticated_entity=self._server_name ), target=UserID.from_string(user_id), - room_id=info["room_id"], + room_id=room_id, # Since it was just created, there are no remote hosts. remote_room_hosts=[], action="join", diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 837dabb3b779..37c87c8351d5 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -690,13 +690,14 @@ async def create_room( config: JsonDict, ratelimit: bool = True, creator_join_profile: Optional[JsonDict] = None, - ) -> Tuple[dict, int]: + ) -> Tuple[str, Optional[RoomAlias], int]: """Creates a new room. Args: - requester: - The user who requested the room creation. - config : A dict of configuration options. + requester: The user who requested the room creation. + config: A dict of configuration options. This will be the body of + a /createRoom request; see + https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom ratelimit: set to False to disable the rate limiter creator_join_profile: @@ -707,14 +708,17 @@ async def create_room( `avatar_url` and/or `displayname`. Returns: - First, a dict containing the keys `room_id` and, if an alias - was, requested, `room_alias`. Secondly, the stream_id of the - last persisted event. + A 3-tuple containing: + - the room ID; + - if requested, the room alias, otherwise None; and + - the `stream_id` of the last persisted event. Raises: - SynapseError if the room ID couldn't be stored, 3pid invitation config - validation failed, or something went horribly wrong. - ResourceLimitError if server is blocked to some resource being - exceeded + SynapseError: + if the room ID couldn't be stored, 3pid invitation config + validation failed, or something went horribly wrong. + ResourceLimitError: + if server is blocked to some resource being + exceeded """ user_id = requester.user.to_string() @@ -1024,11 +1028,6 @@ async def create_room( last_sent_event_id = member_event_id depth += 1 - result = {"room_id": room_id} - - if room_alias: - result["room_alias"] = room_alias.to_string() - # Always wait for room creation to propagate before returning await self._replication.wait_for_stream_position( self.hs.config.worker.events_shard_config.get_instance(room_id), @@ -1036,7 +1035,7 @@ async def create_room( last_stream_id, ) - return result, last_stream_id + return room_id, room_alias, last_stream_id async def _send_events_for_new_room( self, @@ -1825,7 +1824,7 @@ async def shutdown_room( new_room_user_id, authenticated_entity=requester_user_id ) - info, stream_id = await self._room_creation_handler.create_room( + new_room_id, _, stream_id = await self._room_creation_handler.create_room( room_creator_requester, config={ "preset": RoomCreationPreset.PUBLIC_CHAT, @@ -1834,7 +1833,6 @@ async def shutdown_room( }, ratelimit=False, ) - new_room_id = info["room_id"] logger.info( "Shutting down room %r, joining to new room: %r", room_id, new_room_id @@ -1887,6 +1885,7 @@ async def shutdown_room( # Join users to new room if new_room_user_id: + assert new_room_id is not None await self.room_member_handler.update_membership( requester=target_requester, target=target_requester.user, @@ -1919,6 +1918,7 @@ async def shutdown_room( aliases_for_room = await self.store.get_aliases_for_room(room_id) + assert new_room_id is not None await self.store.update_aliases_for_room( room_id, new_room_id, requester_user_id ) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index d22dd19d388a..1964276a54fb 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -1576,14 +1576,14 @@ async def create_room( ) requester = create_requester(user_id) - room_id_and_alias, _ = await self._hs.get_room_creation_handler().create_room( + room_id, room_alias, _ = await self._hs.get_room_creation_handler().create_room( requester=requester, config=config, ratelimit=ratelimit, creator_join_profile=creator_join_profile, ) - - return room_id_and_alias["room_id"], room_id_and_alias.get("room_alias", None) + room_alias_str = room_alias.to_string() if room_alias else None + return room_id, room_alias_str async def set_displayname( self, diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index d0db85cca77f..14b04810a191 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -160,11 +160,11 @@ def on_PUT( async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) - info, _ = await self._room_creation_handler.create_room( + room_id, _, _ = await self._room_creation_handler.create_room( requester, self.get_room_config(request) ) - return 200, info + return 200, {"room_id": room_id} def get_room_config(self, request: Request) -> JsonDict: user_supplied_config = parse_json_object_from_request(request) diff --git a/synapse/server_notices/server_notices_manager.py b/synapse/server_notices/server_notices_manager.py index 564e3705c253..9732dbdb6e66 100644 --- a/synapse/server_notices/server_notices_manager.py +++ b/synapse/server_notices/server_notices_manager.py @@ -178,7 +178,7 @@ async def get_or_create_notice_room_for_user(self, user_id: str) -> str: "avatar_url": self._config.servernotices.server_notices_mxid_avatar_url, } - info, _ = await self._room_creation_handler.create_room( + room_id, _, _ = await self._room_creation_handler.create_room( requester, config={ "preset": RoomCreationPreset.PRIVATE_CHAT, @@ -188,7 +188,6 @@ async def get_or_create_notice_room_for_user(self, user_id: str) -> str: ratelimit=False, creator_join_profile=join_profile, ) - room_id = info["room_id"] self.maybe_get_notice_room_for_user.invalidate((user_id,)) diff --git a/tests/storage/test_cleanup_extrems.py b/tests/storage/test_cleanup_extrems.py index d570684c9926..7de109966d61 100644 --- a/tests/storage/test_cleanup_extrems.py +++ b/tests/storage/test_cleanup_extrems.py @@ -43,8 +43,9 @@ def prepare( # Create a test user and room self.user = UserID("alice", "test") self.requester = create_requester(self.user) - info, _ = self.get_success(self.room_creator.create_room(self.requester, {})) - self.room_id = info["room_id"] + self.room_id, _, _ = self.get_success( + self.room_creator.create_room(self.requester, {}) + ) def run_background_update(self) -> None: """Re run the background update to clean up the extremities.""" @@ -275,10 +276,9 @@ def prepare( self.user = UserID.from_string(self.register_user("user1", "password")) self.token1 = self.login("user1", "password") self.requester = create_requester(self.user) - info, _ = self.get_success( + self.room_id, _, _ = self.get_success( self.room_creator.create_room(self.requester, {"visibility": "public"}) ) - self.room_id = info["room_id"] self.event_creator = homeserver.get_event_creation_handler() homeserver.config.consent.user_consent_version = self.CONSENT_VERSION diff --git a/tests/storage/test_event_metrics.py b/tests/storage/test_event_metrics.py index a91411168cdd..6897addbd358 100644 --- a/tests/storage/test_event_metrics.py +++ b/tests/storage/test_event_metrics.py @@ -33,8 +33,7 @@ def test_exposed_to_prometheus(self) -> None: events = [(3, 2), (6, 2), (4, 6)] for event_count, extrems in events: - info, _ = self.get_success(room_creator.create_room(requester, {})) - room_id = info["room_id"] + room_id, _, _ = self.get_success(room_creator.create_room(requester, {})) last_event = None diff --git a/tests/storage/test_receipts.py b/tests/storage/test_receipts.py index 12c17f10731d..1b52eef23fca 100644 --- a/tests/storage/test_receipts.py +++ b/tests/storage/test_receipts.py @@ -50,12 +50,14 @@ def prepare( self.otherRequester = create_requester(self.otherUser) # Create a test room - info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {})) - self.room_id1 = info["room_id"] + self.room_id1, _, _ = self.get_success( + self.room_creator.create_room(self.ourRequester, {}) + ) # Create a second test room - info, _ = self.get_success(self.room_creator.create_room(self.ourRequester, {})) - self.room_id2 = info["room_id"] + self.room_id2, _, _ = self.get_success( + self.room_creator.create_room(self.ourRequester, {}) + ) # Join the second user to the first room memberEvent, memberEventContext = self.get_success( diff --git a/tests/test_federation.py b/tests/test_federation.py index 82dfd88b9907..46d2f99eacc6 100644 --- a/tests/test_federation.py +++ b/tests/test_federation.py @@ -47,7 +47,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: room_creator.create_room( our_user, room_creator._presets_dict["public_chat"], ratelimit=False ) - )[0]["room_id"] + )[0] self.store = self.hs.get_datastores().main From 91f8de7b5601495589d47f236d5c5cc264078424 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 22 Feb 2023 16:05:34 +0000 Subject: [PATCH 223/344] Clarify the workers that the ThirdPartyRules' `on_new_event` callback will run on (#15071) --- changelog.d/15071.doc | 1 + docs/modules/third_party_rules_callbacks.md | 3 +++ 2 files changed, 4 insertions(+) create mode 100644 changelog.d/15071.doc diff --git a/changelog.d/15071.doc b/changelog.d/15071.doc new file mode 100644 index 000000000000..7fbaba3e8cf0 --- /dev/null +++ b/changelog.d/15071.doc @@ -0,0 +1 @@ +Clarify which worker processes the ThirdPartyRules' [`on_new_event`](https://matrix-org.github.io/synapse/v1.78/modules/third_party_rules_callbacks.html#on_new_event) module API callback runs on. \ No newline at end of file diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md index e1a5b6524fb4..888e43bd1051 100644 --- a/docs/modules/third_party_rules_callbacks.md +++ b/docs/modules/third_party_rules_callbacks.md @@ -146,6 +146,9 @@ Note that this callback is called when the event has already been processed and into the room, which means this callback cannot be used to deny persisting the event. To deny an incoming event, see [`check_event_for_spam`](spam_checker_callbacks.md#check_event_for_spam) instead. +For any given event, this callback will be called on every worker process, even if that worker will not end up +acting on that event. This callback will not be called for events that are marked as rejected. + If multiple modules implement this callback, Synapse runs them all in order. ### `check_can_shutdown_room` From 6def779a1a7c49cd10e635986fbfa1e422eb20bf Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Wed, 22 Feb 2023 20:29:39 +0100 Subject: [PATCH 224/344] Use `json.dump` in `FileExfiltrationWriter` (#15095) To directly write to the open file, instead of writing to an in-memory string first. --- changelog.d/15095.misc | 1 + synapse/app/admin_cmd.py | 16 ++++++++-------- 2 files changed, 9 insertions(+), 8 deletions(-) create mode 100644 changelog.d/15095.misc diff --git a/changelog.d/15095.misc b/changelog.d/15095.misc new file mode 100644 index 000000000000..a2fafe2fffd8 --- /dev/null +++ b/changelog.d/15095.misc @@ -0,0 +1 @@ +Refactor writing json data in `FileExfiltrationWriter`. \ No newline at end of file diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index ad51f33165e3..5003777f0de9 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -149,7 +149,7 @@ def write_events(self, room_id: str, events: List[EventBase]) -> None: with open(events_file, "a") as f: for event in events: - print(json.dumps(event.get_pdu_json()), file=f) + json.dump(event.get_pdu_json(), fp=f) def write_state( self, room_id: str, event_id: str, state: StateMap[EventBase] @@ -162,7 +162,7 @@ def write_state( with open(event_file, "a") as f: for event in state.values(): - print(json.dumps(event.get_pdu_json()), file=f) + json.dump(event.get_pdu_json(), fp=f) def write_invite( self, room_id: str, event: EventBase, state: StateMap[EventBase] @@ -178,7 +178,7 @@ def write_invite( with open(invite_state, "a") as f: for event in state.values(): - print(json.dumps(event), file=f) + json.dump(event, fp=f) def write_knock( self, room_id: str, event: EventBase, state: StateMap[EventBase] @@ -194,7 +194,7 @@ def write_knock( with open(knock_state, "a") as f: for event in state.values(): - print(json.dumps(event), file=f) + json.dump(event, fp=f) def write_profile(self, profile: JsonDict) -> None: user_directory = os.path.join(self.base_directory, "user_data") @@ -202,7 +202,7 @@ def write_profile(self, profile: JsonDict) -> None: profile_file = os.path.join(user_directory, "profile") with open(profile_file, "a") as f: - print(json.dumps(profile), file=f) + json.dump(profile, fp=f) def write_devices(self, devices: List[JsonDict]) -> None: user_directory = os.path.join(self.base_directory, "user_data") @@ -211,7 +211,7 @@ def write_devices(self, devices: List[JsonDict]) -> None: for device in devices: with open(device_file, "a") as f: - print(json.dumps(device), file=f) + json.dump(device, fp=f) def write_connections(self, connections: List[JsonDict]) -> None: user_directory = os.path.join(self.base_directory, "user_data") @@ -220,7 +220,7 @@ def write_connections(self, connections: List[JsonDict]) -> None: for connection in connections: with open(connection_file, "a") as f: - print(json.dumps(connection), file=f) + json.dump(connection, fp=f) def write_account_data( self, file_name: str, account_data: Mapping[str, JsonDict] @@ -233,7 +233,7 @@ def write_account_data( account_data_file = os.path.join(account_data_directory, file_name) with open(account_data_file, "a") as f: - print(json.dumps(account_data), file=f) + json.dump(account_data, fp=f) def finished(self) -> str: return self.base_directory From 4ed08ff72ef8f1abf85ab22de1e51b570f67b27e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 22 Feb 2023 14:37:18 -0500 Subject: [PATCH 225/344] Tighten the default rate limit of creating new devices. (#15135) --- changelog.d/15135.misc | 1 + docs/usage/configuration/config_documentation.md | 6 +++--- synapse/config/ratelimiting.py | 13 +++++++++++-- 3 files changed, 15 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15135.misc diff --git a/changelog.d/15135.misc b/changelog.d/15135.misc new file mode 100644 index 000000000000..25c4dbffe134 --- /dev/null +++ b/changelog.d/15135.misc @@ -0,0 +1 @@ +Tighten the login ratelimit defaults. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 58c695568972..ab1f9f4963c9 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1518,11 +1518,11 @@ rc_registration_token_validity: This option specifies several limits for login: * `address` ratelimits login requests based on the client's IP - address. Defaults to `per_second: 0.17`, `burst_count: 3`. + address. Defaults to `per_second: 0.003`, `burst_count: 5`. * `account` ratelimits login requests based on the account the - client is attempting to log into. Defaults to `per_second: 0.17`, - `burst_count: 3`. + client is attempting to log into. Defaults to `per_second: 0.03`, + `burst_count: 5`. * `failed_attempts` ratelimits login requests based on the account the client is attempting to log into, based on the amount of failed login diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index 5c13fe428a70..b733fac6170a 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -87,9 +87,18 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: defaults={"per_second": 0.1, "burst_count": 5}, ) + # It is reasonable to login with a bunch of devices at once (i.e. when + # setting up an account), but it is *not* valid to continually be + # logging into new devices. rc_login_config = config.get("rc_login", {}) - self.rc_login_address = RatelimitSettings(rc_login_config.get("address", {})) - self.rc_login_account = RatelimitSettings(rc_login_config.get("account", {})) + self.rc_login_address = RatelimitSettings( + rc_login_config.get("address", {}), + defaults={"per_second": 0.003, "burst_count": 5}, + ) + self.rc_login_account = RatelimitSettings( + rc_login_config.get("account", {}), + defaults={"per_second": 0.003, "burst_count": 5}, + ) self.rc_login_failed_attempts = RatelimitSettings( rc_login_config.get("failed_attempts", {}) ) From 9bb2eac71962970d02842bca441f4bcdbbf93a11 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 22 Feb 2023 15:29:09 -0500 Subject: [PATCH 226/344] Bump black from 22.12.0 to 23.1.0 (#15103) --- changelog.d/15103.misc | 1 + poetry.lock | 42 ++++++++++++------- stubs/sortedcontainers/sortedlist.pyi | 1 - synapse/_scripts/register_new_matrix_user.py | 2 - synapse/_scripts/synapse_port_db.py | 1 - synapse/_scripts/synctl.py | 1 - synapse/app/_base.py | 2 +- synapse/app/complement_fork_starter.py | 2 +- synapse/app/generic_worker.py | 1 - synapse/app/homeserver.py | 1 - synapse/config/consent.py | 1 - synapse/config/database.py | 1 - synapse/config/homeserver.py | 1 - synapse/config/ratelimiting.py | 1 - synapse/config/repository.py | 1 - synapse/config/server.py | 1 - synapse/config/tls.py | 1 - synapse/crypto/keyring.py | 2 +- synapse/events/third_party_rules.py | 2 - synapse/federation/send_queue.py | 4 +- synapse/handlers/appservice.py | 2 +- synapse/handlers/auth.py | 2 - synapse/handlers/directory.py | 8 ++-- synapse/handlers/e2e_room_keys.py | 1 - synapse/handlers/event_auth.py | 1 - synapse/handlers/initial_sync.py | 1 - synapse/handlers/presence.py | 2 - synapse/handlers/room.py | 8 ++-- synapse/handlers/room_batch.py | 2 +- synapse/handlers/sync.py | 1 - synapse/logging/opentracing.py | 1 + synapse/metrics/__init__.py | 1 - synapse/metrics/_gc.py | 1 - synapse/push/bulk_push_rule_evaluator.py | 1 - synapse/replication/http/account_data.py | 1 - synapse/replication/http/devices.py | 1 - synapse/replication/tcp/redis.py | 1 - synapse/replication/tcp/streams/events.py | 1 - synapse/rest/admin/rooms.py | 4 -- synapse/rest/admin/users.py | 8 +++- synapse/rest/client/auth.py | 1 - synapse/rest/client/filter.py | 1 - synapse/rest/client/register.py | 18 ++++---- synapse/rest/media/v1/_base.py | 1 - synapse/rest/media/v1/thumbnailer.py | 1 - synapse/storage/databases/main/deviceinbox.py | 5 +-- synapse/storage/databases/main/devices.py | 4 +- .../storage/databases/main/e2e_room_keys.py | 2 +- .../storage/databases/main/end_to_end_keys.py | 8 ++-- .../databases/main/event_federation.py | 1 - synapse/storage/databases/main/events.py | 1 - .../databases/main/events_bg_updates.py | 4 +- .../storage/databases/main/events_worker.py | 2 +- .../databases/main/media_repository.py | 1 - synapse/storage/databases/main/pusher.py | 3 -- synapse/storage/databases/main/receipts.py | 1 - synapse/storage/databases/main/room.py | 1 - synapse/storage/databases/main/search.py | 2 - synapse/storage/databases/main/state.py | 1 - synapse/storage/databases/main/stats.py | 2 +- synapse/storage/databases/main/stream.py | 1 + .../storage/databases/main/transactions.py | 1 - .../storage/databases/main/user_directory.py | 1 - synapse/storage/databases/state/bg_updates.py | 1 - synapse/storage/databases/state/store.py | 7 +--- synapse/storage/prepare_database.py | 4 +- synapse/types/state.py | 2 +- synapse/util/caches/__init__.py | 1 - synapse/util/check_dependencies.py | 2 +- synapse/util/patch_inline_callbacks.py | 1 - synmark/__main__.py | 2 - synmark/suites/logging.py | 1 - tests/federation/test_complexity.py | 4 -- tests/federation/test_federation_server.py | 1 - tests/handlers/test_sso.py | 1 - tests/handlers/test_stats.py | 1 - tests/http/federation/test_srv_resolver.py | 1 - tests/http/test_client.py | 2 +- tests/push/test_bulk_push_rule_evaluator.py | 1 - tests/push/test_email.py | 2 - .../replication/slave/storage/test_events.py | 1 - tests/rest/admin/test_device.py | 3 -- tests/rest/admin/test_media.py | 5 --- tests/rest/admin/test_room.py | 1 - tests/rest/admin/test_server_notice.py | 1 - tests/rest/client/test_account.py | 4 -- tests/rest/client/test_auth.py | 2 - tests/rest/client/test_capabilities.py | 1 - tests/rest/client/test_consent.py | 1 - tests/rest/client/test_directory.py | 1 - tests/rest/client/test_ephemeral_message.py | 1 - tests/rest/client/test_events.py | 3 -- tests/rest/client/test_filter.py | 1 - tests/rest/client/test_login.py | 2 - tests/rest/client/test_login_token_request.py | 1 - tests/rest/client/test_presence.py | 1 - tests/rest/client/test_profile.py | 3 -- tests/rest/client/test_register.py | 4 -- tests/rest/client/test_rendezvous.py | 1 - tests/rest/client/test_rooms.py | 14 +------ tests/rest/client/test_sync.py | 3 -- tests/rest/client/test_third_party_rules.py | 3 ++ tests/rest/media/test_media_retention.py | 1 - tests/rest/media/v1/test_media_storage.py | 3 -- tests/rest/media/v1/test_url_preview.py | 3 -- tests/server_notices/test_consent.py | 2 - .../databases/main/test_deviceinbox.py | 1 - tests/storage/databases/main/test_receipts.py | 2 +- tests/storage/databases/main/test_room.py | 1 - tests/storage/test_client_ips.py | 1 - tests/storage/test_event_chain.py | 2 - tests/storage/test_event_federation.py | 2 +- tests/storage/test_event_push_actions.py | 2 +- tests/storage/test_purge.py | 1 - tests/storage/test_roommember.py | 3 -- tests/storage/test_state.py | 30 ++++++------- tests/test_mau.py | 1 - 117 files changed, 108 insertions(+), 218 deletions(-) create mode 100644 changelog.d/15103.misc diff --git a/changelog.d/15103.misc b/changelog.d/15103.misc new file mode 100644 index 000000000000..65322498c90e --- /dev/null +++ b/changelog.d/15103.misc @@ -0,0 +1 @@ +Bump black from 22.12.0 to 23.1.0. diff --git a/poetry.lock b/poetry.lock index 4d724ab782a5..8ffdab7a222f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -90,32 +90,46 @@ typecheck = ["mypy"] [[package]] name = "black" -version = "22.12.0" +version = "23.1.0" description = "The uncompromising code formatter." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, - {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, - {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, - {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, - {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, - {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, - {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, - {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, - {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, - {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, - {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, - {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, + {file = "black-23.1.0-cp310-cp310-macosx_10_16_arm64.whl", hash = "sha256:b6a92a41ee34b883b359998f0c8e6eb8e99803aa8bf3123bf2b2e6fec505a221"}, + {file = "black-23.1.0-cp310-cp310-macosx_10_16_universal2.whl", hash = "sha256:57c18c5165c1dbe291d5306e53fb3988122890e57bd9b3dcb75f967f13411a26"}, + {file = "black-23.1.0-cp310-cp310-macosx_10_16_x86_64.whl", hash = "sha256:9880d7d419bb7e709b37e28deb5e68a49227713b623c72b2b931028ea65f619b"}, + {file = "black-23.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e6663f91b6feca5d06f2ccd49a10f254f9298cc1f7f49c46e498a0771b507104"}, + {file = "black-23.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9afd3f493666a0cd8f8df9a0200c6359ac53940cbde049dcb1a7eb6ee2dd7074"}, + {file = "black-23.1.0-cp311-cp311-macosx_10_16_arm64.whl", hash = "sha256:bfffba28dc52a58f04492181392ee380e95262af14ee01d4bc7bb1b1c6ca8d27"}, + {file = "black-23.1.0-cp311-cp311-macosx_10_16_universal2.whl", hash = "sha256:c1c476bc7b7d021321e7d93dc2cbd78ce103b84d5a4cf97ed535fbc0d6660648"}, + {file = "black-23.1.0-cp311-cp311-macosx_10_16_x86_64.whl", hash = "sha256:382998821f58e5c8238d3166c492139573325287820963d2f7de4d518bd76958"}, + {file = "black-23.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf649fda611c8550ca9d7592b69f0637218c2369b7744694c5e4902873b2f3a"}, + {file = "black-23.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:121ca7f10b4a01fd99951234abdbd97728e1240be89fde18480ffac16503d481"}, + {file = "black-23.1.0-cp37-cp37m-macosx_10_16_x86_64.whl", hash = "sha256:a8471939da5e824b891b25751955be52ee7f8a30a916d570a5ba8e0f2eb2ecad"}, + {file = "black-23.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8178318cb74f98bc571eef19068f6ab5613b3e59d4f47771582f04e175570ed8"}, + {file = "black-23.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:a436e7881d33acaf2536c46a454bb964a50eff59b21b51c6ccf5a40601fbef24"}, + {file = "black-23.1.0-cp38-cp38-macosx_10_16_arm64.whl", hash = "sha256:a59db0a2094d2259c554676403fa2fac3473ccf1354c1c63eccf7ae65aac8ab6"}, + {file = "black-23.1.0-cp38-cp38-macosx_10_16_universal2.whl", hash = "sha256:0052dba51dec07ed029ed61b18183942043e00008ec65d5028814afaab9a22fd"}, + {file = "black-23.1.0-cp38-cp38-macosx_10_16_x86_64.whl", hash = "sha256:49f7b39e30f326a34b5c9a4213213a6b221d7ae9d58ec70df1c4a307cf2a1580"}, + {file = "black-23.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:162e37d49e93bd6eb6f1afc3e17a3d23a823042530c37c3c42eeeaf026f38468"}, + {file = "black-23.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b70eb40a78dfac24842458476135f9b99ab952dd3f2dab738c1881a9b38b753"}, + {file = "black-23.1.0-cp39-cp39-macosx_10_16_arm64.whl", hash = "sha256:a29650759a6a0944e7cca036674655c2f0f63806ddecc45ed40b7b8aa314b651"}, + {file = "black-23.1.0-cp39-cp39-macosx_10_16_universal2.whl", hash = "sha256:bb460c8561c8c1bec7824ecbc3ce085eb50005883a6203dcfb0122e95797ee06"}, + {file = "black-23.1.0-cp39-cp39-macosx_10_16_x86_64.whl", hash = "sha256:c91dfc2c2a4e50df0026f88d2215e166616e0c80e86004d0003ece0488db2739"}, + {file = "black-23.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a951cc83ab535d248c89f300eccbd625e80ab880fbcfb5ac8afb5f01a258ac9"}, + {file = "black-23.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:0680d4380db3719ebcfb2613f34e86c8e6d15ffeabcf8ec59355c5e7b85bb555"}, + {file = "black-23.1.0-py3-none-any.whl", hash = "sha256:7a0f701d314cfa0896b9001df70a530eb2472babb76086344e688829efd97d32"}, + {file = "black-23.1.0.tar.gz", hash = "sha256:b0bd97bea8903f5a2ba7219257a44e3f1f9d00073d6cc1add68f0beec69692ac"}, ] [package.dependencies] click = ">=8.0.0" mypy-extensions = ">=0.4.3" +packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" -tomli = {version = ">=1.1.0", markers = "python_full_version < \"3.11.0a7\""} +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\" and implementation_name == \"cpython\""} typing-extensions = {version = ">=3.10.0.0", markers = "python_version < \"3.10\""} diff --git a/stubs/sortedcontainers/sortedlist.pyi b/stubs/sortedcontainers/sortedlist.pyi index 1fe1a136f190..0e745c0a79ac 100644 --- a/stubs/sortedcontainers/sortedlist.pyi +++ b/stubs/sortedcontainers/sortedlist.pyi @@ -29,7 +29,6 @@ _Repr = Callable[[], str] def recursive_repr(fillvalue: str = ...) -> Callable[[_Repr], _Repr]: ... class SortedList(MutableSequence[_T]): - DEFAULT_LOAD_FACTOR: int = ... def __init__( self, diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index 2b74a4016687..19ca399d446a 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -47,7 +47,6 @@ def request_registration( _print: Callable[[str], None] = print, exit: Callable[[int], None] = sys.exit, ) -> None: - url = "%s/_synapse/admin/v1/register" % (server_location.rstrip("/"),) # Get the nonce @@ -154,7 +153,6 @@ def register_new_user( def main() -> None: - logging.captureWarnings(True) parser = argparse.ArgumentParser( diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 0d35e0af8fcd..2c9cbf8b275b 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -1205,7 +1205,6 @@ def render(self, force: bool = False) -> None: if self.finished: status = "Time spent: %s (Done!)" % (duration_str,) else: - if self.total_processed > 0: left = float(self.total_remaining) / self.total_processed diff --git a/synapse/_scripts/synctl.py b/synapse/_scripts/synctl.py index b4c96ad7f3e2..077b90935e27 100755 --- a/synapse/_scripts/synctl.py +++ b/synapse/_scripts/synctl.py @@ -167,7 +167,6 @@ def stop(pidfile: str, app: str) -> Optional[int]: def main() -> None: - parser = argparse.ArgumentParser() parser.add_argument( diff --git a/synapse/app/_base.py b/synapse/app/_base.py index a5aa2185a28e..28062dd69d3a 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -213,7 +213,7 @@ def handle_startup_exception(e: Exception) -> NoReturn: def redirect_stdio_to_logs() -> None: streams = [("stdout", LogLevel.info), ("stderr", LogLevel.error)] - for (stream, level) in streams: + for stream, level in streams: oldStream = getattr(sys, stream) loggingFile = LoggingFile( logger=twisted.logger.Logger(namespace=stream), diff --git a/synapse/app/complement_fork_starter.py b/synapse/app/complement_fork_starter.py index 920538f44df2..c8dc3f9d76a1 100644 --- a/synapse/app/complement_fork_starter.py +++ b/synapse/app/complement_fork_starter.py @@ -219,7 +219,7 @@ def handle_signal(signum: int, frame: Optional[FrameType]) -> None: # memory space and don't need to repeat the work of loading the code! # Instead of using fork() directly, we use the multiprocessing library, # which uses fork() on Unix platforms. - for (func, worker_args) in zip(worker_functions, args_by_worker): + for func, worker_args in zip(worker_functions, args_by_worker): process = multiprocessing.Process( target=_worker_entrypoint, args=(func, proxy_reactor, worker_args) ) diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 946f3a380744..0dec24369a49 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -157,7 +157,6 @@ class GenericWorkerServer(HomeServer): DATASTORE_CLASS = GenericWorkerSlavedStore # type: ignore def _listen_http(self, listener_config: ListenerConfig) -> None: - assert listener_config.http_options is not None # We always include a health resource. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 6176a70eb2a0..b8830b1a9c16 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -321,7 +321,6 @@ def setup(config_options: List[str]) -> SynapseHomeServer: and not config.registration.registrations_require_3pid and not config.registration.registration_requires_token ): - raise ConfigError( "You have enabled open registration without any verification. This is a known vector for " "spam and abuse. If you would like to allow public registration, please consider adding email, " diff --git a/synapse/config/consent.py b/synapse/config/consent.py index be74609dc42e..5bfd0cbb7163 100644 --- a/synapse/config/consent.py +++ b/synapse/config/consent.py @@ -22,7 +22,6 @@ class ConsentConfig(Config): - section = "consent" def __init__(self, *args: Any): diff --git a/synapse/config/database.py b/synapse/config/database.py index 928fec8dfe1b..596d8769fe3c 100644 --- a/synapse/config/database.py +++ b/synapse/config/database.py @@ -154,7 +154,6 @@ def read_arguments(self, args: argparse.Namespace) -> None: logger.warning(NON_SQLITE_DATABASE_PATH_WARNING) def set_databasepath(self, database_path: str) -> None: - if database_path != ":memory:": database_path = self.abspath(database_path) diff --git a/synapse/config/homeserver.py b/synapse/config/homeserver.py index 4d2b298a70be..c205a78039b6 100644 --- a/synapse/config/homeserver.py +++ b/synapse/config/homeserver.py @@ -56,7 +56,6 @@ class HomeServerConfig(RootConfig): - config_classes = [ ModulesConfig, ServerConfig, diff --git a/synapse/config/ratelimiting.py b/synapse/config/ratelimiting.py index b733fac6170a..a5514e70a21d 100644 --- a/synapse/config/ratelimiting.py +++ b/synapse/config/ratelimiting.py @@ -46,7 +46,6 @@ class RatelimitConfig(Config): section = "ratelimiting" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - # Load the new-style messages config if it exists. Otherwise fall back # to the old method. if "rc_message" in config: diff --git a/synapse/config/repository.py b/synapse/config/repository.py index e4759711ed95..2da40c09f0e1 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -116,7 +116,6 @@ class ContentRepositoryConfig(Config): section = "media" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - # Only enable the media repo if either the media repo is enabled or the # current worker app is the media repo. if ( diff --git a/synapse/config/server.py b/synapse/config/server.py index d4ef9930b007..0e46b849cf06 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -735,7 +735,6 @@ def generate_config_section( listeners: Optional[List[dict]], **kwargs: Any, ) -> str: - _, bind_port = parse_and_validate_server_name(server_name) if bind_port is not None: unsecure_port = bind_port - 400 diff --git a/synapse/config/tls.py b/synapse/config/tls.py index 336fe3e0daaf..318270ebb8ac 100644 --- a/synapse/config/tls.py +++ b/synapse/config/tls.py @@ -30,7 +30,6 @@ class TlsConfig(Config): section = "tls" def read_config(self, config: JsonDict, **kwargs: Any) -> None: - self.tls_certificate_file = self.abspath(config.get("tls_certificate_path")) self.tls_private_key_file = self.abspath(config.get("tls_private_key_path")) diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index 86cd4af9bd5a..d710607c6367 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -399,7 +399,7 @@ async def _inner_fetch_key_requests( # We now convert the returned list of results into a map from server # name to key ID to FetchKeyResult, to return. to_return: Dict[str, Dict[str, FetchKeyResult]] = {} - for (request, results) in zip(deduped_requests, results_per_request): + for request, results in zip(deduped_requests, results_per_request): to_return_by_server = to_return.setdefault(request.server_name, {}) for key_id, key_result in results.items(): existing = to_return_by_server.get(key_id) diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 97c61cc2586b..9a25ed419bb7 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -78,7 +78,6 @@ def async_wrapper(f: Optional[Callable]) -> Optional[Callable[..., Awaitable]]: # correctly, we need to await its result. Therefore it doesn't make a lot of # sense to make it go through the run() wrapper. if f.__name__ == "check_event_allowed": - # We need to wrap check_event_allowed because its old form would return either # a boolean or a dict, but now we want to return the dict separately from the # boolean. @@ -100,7 +99,6 @@ async def wrap_check_event_allowed( return wrap_check_event_allowed if f.__name__ == "on_create_room": - # We need to wrap on_create_room because its old form would return a boolean # if the room creation is denied, but now we just want it to raise an # exception. diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index d720b5fd3fe2..3063df799099 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -314,7 +314,7 @@ async def get_replication_rows( # stream position. keyed_edus = {v: k for k, v in self.keyed_edu_changed.items()[i:j]} - for ((destination, edu_key), pos) in keyed_edus.items(): + for (destination, edu_key), pos in keyed_edus.items(): rows.append( ( pos, @@ -329,7 +329,7 @@ async def get_replication_rows( j = self.edus.bisect_right(to_token) + 1 edus = self.edus.items()[i:j] - for (pos, edu) in edus: + for pos, edu in edus: rows.append((pos, EduRow(edu))) # Sort rows based on pos diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index 5d1d21cdc8c6..ec3ab968e925 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -737,7 +737,7 @@ async def query_3pe( ) ret = [] - for (success, result) in results: + for success, result in results: if success: ret.extend(result) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index cf12b55d21e9..b12bc4c9a3a3 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -815,7 +815,6 @@ async def refresh_token( now_ms = self._clock.time_msec() if existing_token.expiry_ts is not None and existing_token.expiry_ts < now_ms: - raise SynapseError( HTTPStatus.FORBIDDEN, "The supplied refresh token has expired", @@ -2259,7 +2258,6 @@ async def check_3pid_auth( async def on_logged_out( self, user_id: str, device_id: Optional[str], access_token: str ) -> None: - # call all of the on_logged_out callbacks for callback in self.on_logged_out_callbacks: try: diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index a5798e9483ce..1fb23cc9bf51 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -497,9 +497,11 @@ async def edit_published_room_list( raise SynapseError(403, "Not allowed to publish room") # Check if publishing is blocked by a third party module - allowed_by_third_party_rules = await ( - self.third_party_event_rules.check_visibility_can_be_modified( - room_id, visibility + allowed_by_third_party_rules = ( + await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) ) ) if not allowed_by_third_party_rules: diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 83f53ceb8891..50317ec753da 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -188,7 +188,6 @@ async def upload_room_keys( # XXX: perhaps we should use a finer grained lock here? async with self._upload_linearizer.queue(user_id): - # Check that the version we're trying to upload is the current version try: version_info = await self.store.get_e2e_room_keys_version_info(user_id) diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index 46dd63c3f00d..c508861b6a7b 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -236,7 +236,6 @@ async def check_restricted_join_rules( # in any of them. allowed_rooms = await self.get_rooms_that_allow_join(state_ids) if not await self.is_user_in_rooms(allowed_rooms, user_id): - # If this is a remote request, the user might be in an allowed room # that we do not know about. if get_domain_from_id(user_id) != self._server_name: diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index 1a29abde9839..aead0b44b9fd 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -124,7 +124,6 @@ async def _snapshot_all_rooms( as_client_event: bool = True, include_archived: bool = False, ) -> JsonDict: - memberships = [Membership.INVITE, Membership.JOIN] if include_archived: memberships.append(Membership.LEAVE) diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index 87af31aa2706..4ad223357384 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -777,7 +777,6 @@ async def _on_shutdown(self) -> None: ) if self.unpersisted_users_changes: - await self.store.update_presence( [ self.user_to_current_state[user_id] @@ -823,7 +822,6 @@ async def _update_states( now = self.clock.time_msec() with Measure(self.clock, "presence_update_states"): - # NOTE: We purposefully don't await between now and when we've # calculated what we want to do with the new states, to avoid races. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 37c87c8351d5..a26ec02284bf 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -868,9 +868,11 @@ async def create_room( ) # Check whether this visibility value is blocked by a third party module - allowed_by_third_party_rules = await ( - self.third_party_event_rules.check_visibility_can_be_modified( - room_id, visibility + allowed_by_third_party_rules = ( + await ( + self.third_party_event_rules.check_visibility_can_be_modified( + room_id, visibility + ) ) ) if not allowed_by_third_party_rules: diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py index c73d2adaad47..5d4ca0e2d21a 100644 --- a/synapse/handlers/room_batch.py +++ b/synapse/handlers/room_batch.py @@ -374,7 +374,7 @@ async def persist_historical_events( # correct stream_ordering as they are backfilled (which decrements). # Events are sorted by (topological_ordering, stream_ordering) # where topological_ordering is just depth. - for (event, context) in reversed(events_to_persist): + for event, context in reversed(events_to_persist): # This call can't raise `PartialStateConflictError` since we forbid # use of the historical batch API during partial state await self.event_creation_handler.handle_new_client_event( diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 4e4595312c4d..fd6d946c37f0 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1297,7 +1297,6 @@ async def unread_notifs_for_room_id( return RoomNotifCounts.empty() with Measure(self.clock, "unread_notifs_for_room_id"): - return await self.store.get_unread_event_push_actions_by_room_for_user( room_id, sync_config.user.to_string(), diff --git a/synapse/logging/opentracing.py b/synapse/logging/opentracing.py index 5aed71262fbd..c70eee649c57 100644 --- a/synapse/logging/opentracing.py +++ b/synapse/logging/opentracing.py @@ -524,6 +524,7 @@ def whitelisted_homeserver(destination: str) -> bool: # Start spans and scopes + # Could use kwargs but I want these to be explicit def start_active_span( operation_name: str, diff --git a/synapse/metrics/__init__.py b/synapse/metrics/__init__.py index b01372565d14..8ce58872293a 100644 --- a/synapse/metrics/__init__.py +++ b/synapse/metrics/__init__.py @@ -87,7 +87,6 @@ class LaterGauge(Collector): ] def collect(self) -> Iterable[Metric]: - g = GaugeMetricFamily(self.name, self.desc, labels=self.labels) try: diff --git a/synapse/metrics/_gc.py b/synapse/metrics/_gc.py index b7d47ce3e744..a22c4e5bbd1a 100644 --- a/synapse/metrics/_gc.py +++ b/synapse/metrics/_gc.py @@ -139,7 +139,6 @@ def _maybe_gc() -> None: class PyPyGCStats(Collector): def collect(self) -> Iterable[Metric]: - # @stats is a pretty-printer object with __str__() returning a nice table, # plus some fields that contain data from that table. # unfortunately, fields are pretty-printed themselves (i. e. '4.5MB'). diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 5fc38431babe..8f834be7742a 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -330,7 +330,6 @@ async def _action_for_event_by_user( context: EventContext, event_id_to_event: Mapping[str, EventBase], ) -> None: - if ( not event.internal_metadata.is_notifiable() or event.internal_metadata.is_historical() diff --git a/synapse/replication/http/account_data.py b/synapse/replication/http/account_data.py index 2374f810c94f..111ec07e64af 100644 --- a/synapse/replication/http/account_data.py +++ b/synapse/replication/http/account_data.py @@ -265,7 +265,6 @@ def __init__(self, hs: "HomeServer"): @staticmethod async def _serialize_payload(user_id: str, room_id: str, tag: str) -> JsonDict: # type: ignore[override] - return {} async def _handle_request( # type: ignore[override] diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index ecea6fc915c7..cc3929dcf565 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -195,7 +195,6 @@ def __init__(self, hs: "HomeServer"): async def _serialize_payload( # type: ignore[override] user_id: str, device_id: str, keys: JsonDict ) -> JsonDict: - return { "user_id": user_id, "device_id": device_id, diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index fd1c0ec6afa2..dfc061eb5e38 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -328,7 +328,6 @@ def __init__( outbound_redis_connection: txredisapi.ConnectionHandler, channel_names: List[str], ): - super().__init__( hs, uuid="subscriber", diff --git a/synapse/replication/tcp/streams/events.py b/synapse/replication/tcp/streams/events.py index 14b6705862ac..ad9b76071383 100644 --- a/synapse/replication/tcp/streams/events.py +++ b/synapse/replication/tcp/streams/events.py @@ -139,7 +139,6 @@ async def _update_function( current_token: Token, target_row_count: int, ) -> StreamUpdateResult: - # the events stream merges together three separate sources: # * new events # * current_state changes diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index 1d6e4982d7be..4de56bf13f31 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -75,7 +75,6 @@ def __init__(self, hs: "HomeServer"): async def on_DELETE( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: - requester = await self._auth.get_user_by_req(request) await assert_user_is_admin(self._auth, requester) @@ -144,7 +143,6 @@ def __init__(self, hs: "HomeServer"): async def on_GET( self, request: SynapseRequest, room_id: str ) -> Tuple[int, JsonDict]: - await assert_requester_is_admin(self._auth, request) if not RoomID.is_valid(room_id): @@ -181,7 +179,6 @@ def __init__(self, hs: "HomeServer"): async def on_GET( self, request: SynapseRequest, delete_id: str ) -> Tuple[int, JsonDict]: - await assert_requester_is_admin(self._auth, request) delete_status = self._pagination_handler.get_delete_status(delete_id) @@ -438,7 +435,6 @@ async def on_GET( class JoinRoomAliasServlet(ResolveRoomIdMixin, RestServlet): - PATTERNS = admin_patterns("/join/(?P[^/]*)$") def __init__(self, hs: "HomeServer"): diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 0c0bf540b9ac..7cc4db20d626 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -683,8 +683,12 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) if self.account_activity_handler.on_legacy_admin_request_callback: - expiration_ts = await ( - self.account_activity_handler.on_legacy_admin_request_callback(request) + expiration_ts = ( + await ( + self.account_activity_handler.on_legacy_admin_request_callback( + request + ) + ) ) else: body = parse_json_object_from_request(request) diff --git a/synapse/rest/client/auth.py b/synapse/rest/client/auth.py index eb77337044da..276a1b405d44 100644 --- a/synapse/rest/client/auth.py +++ b/synapse/rest/client/auth.py @@ -97,7 +97,6 @@ async def on_GET(self, request: SynapseRequest, stagetype: str) -> None: return None async def on_POST(self, request: Request, stagetype: str) -> None: - session = parse_string(request, "session") if not session: raise SynapseError(400, "No session supplied") diff --git a/synapse/rest/client/filter.py b/synapse/rest/client/filter.py index cc1c2f973140..236199897ce0 100644 --- a/synapse/rest/client/filter.py +++ b/synapse/rest/client/filter.py @@ -79,7 +79,6 @@ def __init__(self, hs: "HomeServer"): async def on_POST( self, request: SynapseRequest, user_id: str ) -> Tuple[int, JsonDict]: - target_user = UserID.from_string(user_id) requester = await self.auth.get_user_by_req(request) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 3cb1e7e37501..bce806f2bbb4 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -628,10 +628,12 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if not password_hash: raise SynapseError(400, "Missing params: password", Codes.MISSING_PARAM) - desired_username = await ( - self.password_auth_provider.get_username_for_registration( - auth_result, - params, + desired_username = ( + await ( + self.password_auth_provider.get_username_for_registration( + auth_result, + params, + ) ) ) @@ -682,9 +684,11 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: session_id ) - display_name = await ( - self.password_auth_provider.get_displayname_for_registration( - auth_result, params + display_name = ( + await ( + self.password_auth_provider.get_displayname_for_registration( + auth_result, params + ) ) ) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index 6e035afcce59..ef8334ae2586 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -270,7 +270,6 @@ async def respond_with_responder( logger.debug("Responding to media request with responder %s", responder) add_file_headers(request, media_type, file_size, upload_name) try: - await responder.write_to_consumer(request) except Exception as e: # The majority of the time this will be due to the client having gone diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 9480cc576332..f909a4fb9a19 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -38,7 +38,6 @@ class ThumbnailError(Exception): class Thumbnailer: - FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"} @staticmethod diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 8e61aba4548d..0d75d9739a70 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -721,8 +721,8 @@ def add_messages_txn( ], ) - for (user_id, messages_by_device) in edu["messages"].items(): - for (device_id, msg) in messages_by_device.items(): + for user_id, messages_by_device in edu["messages"].items(): + for device_id, msg in messages_by_device.items(): with start_active_span("store_outgoing_to_device_message"): set_tag(SynapseTags.TO_DEVICE_EDU_ID, edu["sender"]) set_tag(SynapseTags.TO_DEVICE_EDU_ID, edu["message_id"]) @@ -959,7 +959,6 @@ async def _remove_dead_devices_from_device_inbox( def _remove_dead_devices_from_device_inbox_txn( txn: LoggingTransaction, ) -> Tuple[int, bool]: - if "max_stream_id" in progress: max_stream_id = progress["max_stream_id"] else: diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 1ca66d57d40c..0dd15f16ff7b 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -512,7 +512,7 @@ async def get_device_updates_by_remote( results.append(("org.matrix.signing_key_update", result)) if issue_8631_logger.isEnabledFor(logging.DEBUG): - for (user_id, edu) in results: + for user_id, edu in results: issue_8631_logger.debug( "device update to %s for %s from %s to %s: %s", destination, @@ -1316,7 +1316,7 @@ def _prune_txn(txn: LoggingTransaction) -> None: ) """ count = 0 - for (destination, user_id, stream_id, device_id) in rows: + for destination, user_id, stream_id, device_id in rows: txn.execute( delete_sql, (destination, user_id, stream_id, stream_id, device_id) ) diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index 6240f9a75ed3..9f8d2e4bea65 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -108,7 +108,7 @@ async def add_e2e_room_keys( raise StoreError(404, "No backup with that version exists") values = [] - for (room_id, session_id, room_key) in room_keys: + for room_id, session_id, room_key in room_keys: values.append( ( user_id, diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 2c2d1456666b..b9c39b171810 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -268,7 +268,7 @@ async def get_e2e_device_keys_and_signatures( ) # add each cross-signing signature to the correct device in the result dict. - for (user_id, key_id, device_id, signature) in cross_sigs_result: + for user_id, key_id, device_id, signature in cross_sigs_result: target_device_result = result[user_id][device_id] # We've only looked up cross-signatures for non-deleted devices with key # data. @@ -311,7 +311,7 @@ def _get_e2e_device_keys_txn( # devices. user_list = [] user_device_list = [] - for (user_id, device_id) in query_list: + for user_id, device_id in query_list: if device_id is None: user_list.append(user_id) else: @@ -353,7 +353,7 @@ def _get_e2e_device_keys_txn( txn.execute(sql, query_params) - for (user_id, device_id, display_name, key_json) in txn: + for user_id, device_id, display_name, key_json in txn: assert device_id is not None if include_deleted_devices: deleted_devices.remove((user_id, device_id)) @@ -382,7 +382,7 @@ def _get_e2e_cross_signing_signatures_for_devices_txn( signature_query_clauses = [] signature_query_params = [] - for (user_id, device_id) in device_query: + for user_id, device_id in device_query: signature_query_clauses.append( "target_user_id = ? AND target_device_id = ? AND user_id = ?" ) diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index ca780cca36ec..ff3edeb7160b 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -1612,7 +1612,6 @@ def _get_missing_events( latest_events: List[str], limit: int, ) -> List[str]: - seen_events = set(earliest_events) front = set(latest_events) - seen_events event_results: List[str] = [] diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 7996cbb557d0..73b8aea16cc7 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -469,7 +469,6 @@ def _persist_event_auth_chain_txn( txn: LoggingTransaction, events: List[EventBase], ) -> None: - # We only care about state events, so this if there are no state events. if not any(e.is_state() for e in events): return diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 584536111daa..0a275e6ce665 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -709,7 +709,7 @@ def _event_store_labels_txn(txn: LoggingTransaction) -> int: nbrows = 0 last_row_event_id = "" - for (event_id, event_json_raw) in results: + for event_id, event_json_raw in results: try: event_json = db_to_json(event_json_raw) @@ -1167,7 +1167,7 @@ def _event_arbitrary_relations_txn(txn: LoggingTransaction) -> int: results = list(txn) # (event_id, parent_id, rel_type) for each relation relations_to_insert: List[Tuple[str, str, str]] = [] - for (event_id, event_json_raw) in results: + for event_id, event_json_raw in results: try: event_json = db_to_json(event_json_raw) except Exception as e: diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 6d0ef10258de..b7e74981256c 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -1493,7 +1493,7 @@ def _fetch_event_rows( txn.execute(redactions_sql + clause, args) - for (redacter, redacted) in txn: + for redacter, redacted in txn: d = event_dict.get(redacted) if d: d.redactions.append(redacter) diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index b202c5eb87a6..fa8be214ce90 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -196,7 +196,6 @@ async def get_local_media_by_user_paginate( def get_local_media_by_user_paginate_txn( txn: LoggingTransaction, ) -> Tuple[List[Dict[str, Any]], int]: - # Set ordering order_by_column = MediaSortOrder(order_by).value diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index df53e726e62a..fddbc07afa52 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -344,7 +344,6 @@ async def _remove_deactivated_pushers(self, progress: dict, batch_size: int) -> last_user = progress.get("last_user", "") def _delete_pushers(txn: LoggingTransaction) -> int: - sql = """ SELECT name FROM users WHERE deactivated = ? and name > ? @@ -392,7 +391,6 @@ async def _remove_stale_pushers(self, progress: dict, batch_size: int) -> int: last_pusher = progress.get("last_pusher", 0) def _delete_pushers(txn: LoggingTransaction) -> int: - sql = """ SELECT p.id, access_token FROM pushers AS p LEFT JOIN access_tokens AS a ON (p.access_token = a.id) @@ -449,7 +447,6 @@ async def _remove_deleted_email_pushers( last_pusher = progress.get("last_pusher", 0) def _delete_pushers(txn: LoggingTransaction) -> int: - sql = """ SELECT p.id, p.user_name, p.app_id, p.pushkey FROM pushers AS p diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index dddf49c2d575..92a82240ab4c 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -887,7 +887,6 @@ async def _populate_receipt_event_stream_ordering( def _populate_receipt_event_stream_ordering_txn( txn: LoggingTransaction, ) -> bool: - if "max_stream_id" in progress: max_stream_id = progress["max_stream_id"] else: diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 644bbb88788f..39f89291b2ba 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -2168,7 +2168,6 @@ async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]: def _get_event_report_txn( txn: LoggingTransaction, report_id: int ) -> Optional[Dict[str, Any]]: - sql = """ SELECT er.id, diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index 3fe433f66ccc..a7aae661d860 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -122,7 +122,6 @@ def store_search_entries_txn( class SearchBackgroundUpdateStore(SearchWorkerStore): - EVENT_SEARCH_UPDATE_NAME = "event_search" EVENT_SEARCH_ORDER_UPDATE_NAME = "event_search_order" EVENT_SEARCH_USE_GIN_POSTGRES_NAME = "event_search_postgres_gin" @@ -615,7 +614,6 @@ async def search_rooms( """ count_args = [search_query] + count_args elif isinstance(self.database_engine, Sqlite3Engine): - # We use CROSS JOIN here to ensure we use the right indexes. # https://sqlite.org/optoverview.html#crossjoin # diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index ba325d390b58..ebb2ae964f5a 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -490,7 +490,6 @@ def _update_state_for_partial_state_event_txn( class MainStateBackgroundUpdateStore(RoomMemberWorkerStore): - CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" DELETE_CURRENT_STATE_UPDATE_NAME = "delete_old_current_state_events" diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index d7b7d0c3c909..d3393d8e493e 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -461,7 +461,7 @@ def _upsert_with_additive_relatives_txn( insert_cols = [] qargs = [] - for (key, val) in chain( + for key, val in chain( keyvalues.items(), absolutes.items(), additive_relatives.items() ): insert_cols.append(key) diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 818c46182e01..ac5fbf6b8621 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -87,6 +87,7 @@ _STREAM_TOKEN = "stream" _TOPOLOGICAL_TOKEN = "topological" + # Used as return values for pagination APIs @attr.s(slots=True, frozen=True, auto_attribs=True) class _EventDictReturn: diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 6b33d809b6cf..6d72bd9f67f2 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -573,7 +573,6 @@ async def get_destination_rooms_paginate( def get_destination_rooms_paginate_txn( txn: LoggingTransaction, ) -> Tuple[List[JsonDict], int]: - if direction == Direction.BACKWARDS: order = "DESC" else: diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 30af4b3b6cfe..c3f2b61bd543 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -98,7 +98,6 @@ def __init__( async def _populate_user_directory_createtables( self, progress: JsonDict, batch_size: int ) -> int: - # Get all the rooms that we want to process. def _make_staging_area(txn: LoggingTransaction) -> None: sql = ( diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index d743282f13a3..097dea51828c 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -251,7 +251,6 @@ def _get_state_groups_from_groups_txn( class StateBackgroundUpdateStore(StateGroupBackgroundUpdateStore): - STATE_GROUP_DEDUPLICATION_UPDATE_NAME = "state_group_state_deduplication" STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx" diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 1a7232b27665..89b1faa6c834 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -257,14 +257,11 @@ async def _get_state_for_groups( member_filter, non_member_filter = state_filter.get_member_split() # Now we look them up in the member and non-member caches - ( - non_member_state, - incomplete_groups_nm, - ) = self._get_state_for_groups_using_cache( + non_member_state, incomplete_groups_nm = self._get_state_for_groups_using_cache( groups, self._state_group_cache, state_filter=non_member_filter ) - (member_state, incomplete_groups_m,) = self._get_state_for_groups_using_cache( + member_state, incomplete_groups_m = self._get_state_for_groups_using_cache( groups, self._state_group_members_cache, state_filter=member_filter ) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 6c335a931501..2a1c6fa31bc2 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -563,7 +563,7 @@ def _apply_module_schemas( """ # This is the old way for password_auth_provider modules to make changes # to the database. This should instead be done using the module API - for (mod, _config) in config.authproviders.password_providers: + for mod, _config in config.authproviders.password_providers: if not hasattr(mod, "get_db_schema_files"): continue modname = ".".join((mod.__module__, mod.__name__)) @@ -591,7 +591,7 @@ def _apply_module_schema_files( (modname,), ) applied_deltas = {d for d, in cur} - for (name, stream) in names_and_streams: + for name, stream in names_and_streams: if name in applied_deltas: continue diff --git a/synapse/types/state.py b/synapse/types/state.py index 743a4f9217ee..4b3071acce32 100644 --- a/synapse/types/state.py +++ b/synapse/types/state.py @@ -120,7 +120,7 @@ def from_types(types: Iterable[Tuple[str, Optional[str]]]) -> "StateFilter": def to_types(self) -> Iterable[Tuple[str, Optional[str]]]: """The inverse to `from_types`.""" - for (event_type, state_keys) in self.types.items(): + for event_type, state_keys in self.types.items(): if state_keys is None: yield event_type, None else: diff --git a/synapse/util/caches/__init__.py b/synapse/util/caches/__init__.py index 9387632d0d74..6ffa56217eb9 100644 --- a/synapse/util/caches/__init__.py +++ b/synapse/util/caches/__init__.py @@ -98,7 +98,6 @@ class EvictionReason(Enum): @attr.s(slots=True, auto_attribs=True) class CacheMetric: - _cache: Sized _cache_type: str _cache_name: str diff --git a/synapse/util/check_dependencies.py b/synapse/util/check_dependencies.py index 3b1e20570020..1c0fde4966e7 100644 --- a/synapse/util/check_dependencies.py +++ b/synapse/util/check_dependencies.py @@ -183,7 +183,7 @@ def check_requirements(extra: Optional[str] = None) -> None: deps_unfulfilled = [] errors = [] - for (requirement, must_be_installed) in dependencies: + for requirement, must_be_installed in dependencies: try: dist: metadata.Distribution = metadata.distribution(requirement.name) except metadata.PackageNotFoundError: diff --git a/synapse/util/patch_inline_callbacks.py b/synapse/util/patch_inline_callbacks.py index f97f98a05742..d00d34e652a0 100644 --- a/synapse/util/patch_inline_callbacks.py +++ b/synapse/util/patch_inline_callbacks.py @@ -211,7 +211,6 @@ def check_yield_points_inner( result = Failure() if current_context() != expected_context: - # This happens because the context is lost sometime *after* the # previous yield and *after* the current yield. E.g. the # deferred we waited on didn't follow the rules, or we forgot to diff --git a/synmark/__main__.py b/synmark/__main__.py index 35a59e347a46..19de6391877f 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -34,12 +34,10 @@ def make_test(main): """ def _main(loops): - reactor = make_reactor() file_out = StringIO() with redirect_stderr(file_out): - d = Deferred() d.addCallback(lambda _: ensureDeferred(main(reactor, loops))) diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index 9419892e957c..8beb077e0a33 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -30,7 +30,6 @@ class LineCounter(LineOnlyReceiver): - delimiter = b"\n" def __init__(self, *args, **kwargs): diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 35dd9a20df2c..33af8770fde8 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -24,7 +24,6 @@ class RoomComplexityTests(unittest.FederatingHomeserverTestCase): - servlets = [ admin.register_servlets, room.register_servlets, @@ -37,7 +36,6 @@ def default_config(self) -> JsonDict: return config def test_complexity_simple(self) -> None: - u1 = self.register_user("u1", "pass") u1_token = self.login("u1", "pass") @@ -71,7 +69,6 @@ async def get_current_state_event_counts(room_id: str) -> int: self.assertEqual(complexity, 1.23) def test_join_too_large(self) -> None: - u1 = self.register_user("u1", "pass") handler = self.hs.get_room_member_handler() @@ -131,7 +128,6 @@ def test_join_too_large_admin(self) -> None: self.assertEqual(f.value.errcode, Codes.RESOURCE_LIMIT_EXCEEDED) def test_join_too_large_once_joined(self) -> None: - u1 = self.register_user("u1", "pass") u1_token = self.login("u1", "pass") diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index bba6469b559b..6c7738d81020 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -34,7 +34,6 @@ class FederationServerTests(unittest.FederatingHomeserverTestCase): - servlets = [ admin.register_servlets, room.register_servlets, diff --git a/tests/handlers/test_sso.py b/tests/handlers/test_sso.py index 137deab138b5..d6f43a98fcda 100644 --- a/tests/handlers/test_sso.py +++ b/tests/handlers/test_sso.py @@ -113,7 +113,6 @@ async def mock_get_file( headers: Optional[RawHeaders] = None, is_allowed_content_type: Optional[Callable[[str], bool]] = None, ) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: - fake_response = FakeResponse(code=404) if url == "http://my.server/me.png": fake_response = FakeResponse( diff --git a/tests/handlers/test_stats.py b/tests/handlers/test_stats.py index f1a50c5bcb26..d11ded6c5b63 100644 --- a/tests/handlers/test_stats.py +++ b/tests/handlers/test_stats.py @@ -31,7 +31,6 @@ class StatsRoomTests(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, room.register_servlets, diff --git a/tests/http/federation/test_srv_resolver.py b/tests/http/federation/test_srv_resolver.py index 7748f56ee6e2..6ab13357f942 100644 --- a/tests/http/federation/test_srv_resolver.py +++ b/tests/http/federation/test_srv_resolver.py @@ -46,7 +46,6 @@ def test_resolve(self) -> None: @defer.inlineCallbacks def do_lookup() -> Generator["Deferred[object]", object, List[Server]]: - with LoggingContext("one") as ctx: resolve_d = resolver.resolve_service(service_name) result: List[Server] diff --git a/tests/http/test_client.py b/tests/http/test_client.py index 9cfe1ad0de0a..f6d668498589 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -149,7 +149,7 @@ def setUp(self) -> None: self.allowed_domain, self.allowed_ip = b"allowed.test", b"5.1.1.1" # Configure the reactor's DNS resolver. - for (domain, ip) in ( + for domain, ip in ( (self.safe_domain, self.safe_ip), (self.unsafe_domain, self.unsafe_ip), (self.allowed_domain, self.allowed_ip), diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 199e3d7b7007..dce6899e787b 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -33,7 +33,6 @@ class TestBulkPushRuleEvaluator(HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, room.register_servlets, diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 7563f33fdc78..0a3aca5c5076 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -39,7 +39,6 @@ class _User: class EmailPusherTests(HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -48,7 +47,6 @@ class EmailPusherTests(HomeserverTestCase): hijack_auth = False def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["email"] = { "enable_notifs": True, diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index ddca9d696c75..57c781a0c3fd 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -64,7 +64,6 @@ def unpatch() -> None: class EventsWorkerStoreTestCase(BaseSlavedStoreTestCase): - STORE_TYPE = EventsWorkerStore def setUp(self) -> None: diff --git a/tests/rest/admin/test_device.py b/tests/rest/admin/test_device.py index 03f2112b07ce..aaa488bced1e 100644 --- a/tests/rest/admin/test_device.py +++ b/tests/rest/admin/test_device.py @@ -28,7 +28,6 @@ class DeviceRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -291,7 +290,6 @@ def test_delete_device(self) -> None: class DevicesRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, @@ -415,7 +413,6 @@ def test_get_devices(self) -> None: class DeleteDevicesRestTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index db77a45ae39f..f41319a5b66f 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -34,7 +34,6 @@ class DeleteMediaByIDTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, @@ -196,7 +195,6 @@ def test_delete_media(self) -> None: class DeleteMediaByDateSizeTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, @@ -594,7 +592,6 @@ def _access_media( class QuarantineMediaByIDTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, @@ -724,7 +721,6 @@ def test_quarantine_protected_media(self) -> None: class ProtectMediaByIDTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, @@ -821,7 +817,6 @@ def test_protect_media(self) -> None: class PurgeMediaCacheTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 453a6e979c02..9dbb778679d1 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -1990,7 +1990,6 @@ def test_room_messages_purge(self) -> None: class JoinAliasRoomTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, room.register_servlets, diff --git a/tests/rest/admin/test_server_notice.py b/tests/rest/admin/test_server_notice.py index f71ff46d8777..28b999573e75 100644 --- a/tests/rest/admin/test_server_notice.py +++ b/tests/rest/admin/test_server_notice.py @@ -28,7 +28,6 @@ class ServerNoticeTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index e2ee1a1766cd..2b05dffc7d22 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -40,7 +40,6 @@ class PasswordResetTestCase(unittest.HomeserverTestCase): - servlets = [ account.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -408,7 +407,6 @@ def _reset_password( class DeactivateTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -492,7 +490,6 @@ def deactivate(self, user_id: str, tok: str) -> None: class WhoamiTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -567,7 +564,6 @@ def _whoami(self, tok: str) -> JsonDict: class ThreepidEmailRestTestCase(unittest.HomeserverTestCase): - servlets = [ account.register_servlets, login.register_servlets, diff --git a/tests/rest/client/test_auth.py b/tests/rest/client/test_auth.py index a1446100780c..0d8fe77b884f 100644 --- a/tests/rest/client/test_auth.py +++ b/tests/rest/client/test_auth.py @@ -52,7 +52,6 @@ def check_auth(self, authdict: dict, clientip: str) -> Any: class FallbackAuthTests(unittest.HomeserverTestCase): - servlets = [ auth.register_servlets, register.register_servlets, @@ -60,7 +59,6 @@ class FallbackAuthTests(unittest.HomeserverTestCase): hijack_auth = False def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["enable_registration_captcha"] = True diff --git a/tests/rest/client/test_capabilities.py b/tests/rest/client/test_capabilities.py index d1751e1557f7..c16e8d43f419 100644 --- a/tests/rest/client/test_capabilities.py +++ b/tests/rest/client/test_capabilities.py @@ -26,7 +26,6 @@ class CapabilitiesTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, capabilities.register_servlets, diff --git a/tests/rest/client/test_consent.py b/tests/rest/client/test_consent.py index b1ca81a91191..bb845179d3f8 100644 --- a/tests/rest/client/test_consent.py +++ b/tests/rest/client/test_consent.py @@ -38,7 +38,6 @@ class ConsentResourceTestCase(unittest.HomeserverTestCase): hijack_auth = False def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["form_secret"] = "123abc" diff --git a/tests/rest/client/test_directory.py b/tests/rest/client/test_directory.py index 7a88aa2cda7f..6490e883bf28 100644 --- a/tests/rest/client/test_directory.py +++ b/tests/rest/client/test_directory.py @@ -28,7 +28,6 @@ class DirectoryTestCase(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, directory.register_servlets, diff --git a/tests/rest/client/test_ephemeral_message.py b/tests/rest/client/test_ephemeral_message.py index 9fa1f82dfee0..f31ebc8021bb 100644 --- a/tests/rest/client/test_ephemeral_message.py +++ b/tests/rest/client/test_ephemeral_message.py @@ -26,7 +26,6 @@ class EphemeralMessageTestCase(unittest.HomeserverTestCase): - user_id = "@user:test" servlets = [ diff --git a/tests/rest/client/test_events.py b/tests/rest/client/test_events.py index a9b7db9db227..54df2a252c01 100644 --- a/tests/rest/client/test_events.py +++ b/tests/rest/client/test_events.py @@ -38,7 +38,6 @@ class EventStreamPermissionsTestCase(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["enable_registration_captcha"] = False config["enable_registration"] = True @@ -51,7 +50,6 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - # register an account self.user_id = self.register_user("sid1", "pass") self.token = self.login(self.user_id, "pass") @@ -142,7 +140,6 @@ class GetEventsTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - # register an account self.user_id = self.register_user("sid1", "pass") self.token = self.login(self.user_id, "pass") diff --git a/tests/rest/client/test_filter.py b/tests/rest/client/test_filter.py index 830762fd5358..91678abf1311 100644 --- a/tests/rest/client/test_filter.py +++ b/tests/rest/client/test_filter.py @@ -25,7 +25,6 @@ class FilterTestCase(unittest.HomeserverTestCase): - user_id = "@apple:test" hijack_auth = True EXAMPLE_FILTER = {"room": {"timeline": {"types": ["m.room.message"]}}} diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index ff5baa9f0a78..62acf4f44ed0 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -89,7 +89,6 @@ class LoginRestServletTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -737,7 +736,6 @@ def _get_value_from_macaroon(macaroon: pymacaroons.Macaroon, key: str) -> str: class CASTestCase(unittest.HomeserverTestCase): - servlets = [ login.register_servlets, ] diff --git a/tests/rest/client/test_login_token_request.py b/tests/rest/client/test_login_token_request.py index 6aedc1a11c9e..b8187db982bf 100644 --- a/tests/rest/client/test_login_token_request.py +++ b/tests/rest/client/test_login_token_request.py @@ -26,7 +26,6 @@ class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): - servlets = [ login.register_servlets, admin.register_servlets, diff --git a/tests/rest/client/test_presence.py b/tests/rest/client/test_presence.py index 67e16880e60b..dcbb125a3bba 100644 --- a/tests/rest/client/test_presence.py +++ b/tests/rest/client/test_presence.py @@ -35,7 +35,6 @@ class PresenceTestCase(unittest.HomeserverTestCase): servlets = [presence.register_servlets] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.presence_handler = Mock(spec=PresenceHandler) self.presence_handler.set_state.return_value = make_awaitable(None) diff --git a/tests/rest/client/test_profile.py b/tests/rest/client/test_profile.py index 8de5a342aeef..27c93ad76122 100644 --- a/tests/rest/client/test_profile.py +++ b/tests/rest/client/test_profile.py @@ -30,7 +30,6 @@ class ProfileTestCase(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -324,7 +323,6 @@ def _setup_local_files(self, names_and_props: Dict[str, Dict[str, Any]]) -> None class ProfilesRestrictedTestCase(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, login.register_servlets, @@ -404,7 +402,6 @@ def ensure_requester_left_room(self) -> None: class OwnProfileUnrestrictedTestCase(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets_for_client_rest_resource, login.register_servlets, diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index 4c561f9525b9..b228dba8613d 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -40,7 +40,6 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): - servlets = [ login.register_servlets, register.register_servlets, @@ -797,7 +796,6 @@ def test_require_approval(self) -> None: class AccountValidityTestCase(unittest.HomeserverTestCase): - servlets = [ register.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -913,7 +911,6 @@ def test_logging_out_expired_user(self) -> None: class AccountValidityRenewalByEmailTestCase(unittest.HomeserverTestCase): - servlets = [ register.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -1132,7 +1129,6 @@ def test_manual_email_send_expired_account(self) -> None: class AccountValidityBackgroundJobTestCase(unittest.HomeserverTestCase): - servlets = [synapse.rest.admin.register_servlets_for_client_rest_resource] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: diff --git a/tests/rest/client/test_rendezvous.py b/tests/rest/client/test_rendezvous.py index c0eb5d01a65b..8dbd64be552b 100644 --- a/tests/rest/client/test_rendezvous.py +++ b/tests/rest/client/test_rendezvous.py @@ -25,7 +25,6 @@ class RendezvousServletTestCase(unittest.HomeserverTestCase): - servlets = [ rendezvous.register_servlets, ] diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index cfad182b2f1a..4dd763096d3a 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -65,7 +65,6 @@ class RoomBase(unittest.HomeserverTestCase): servlets = [room.register_servlets, room.register_deprecated_servlets] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.hs = self.setup_test_homeserver( "red", federation_http_client=None, @@ -92,7 +91,6 @@ class RoomPermissionsTestCase(RoomBase): rmcreator_id = "@notme:red" def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.helper.auth_user_id = self.rmcreator_id # create some rooms under the name rmcreator_id self.uncreated_rmid = "!aa:test" @@ -1127,7 +1125,6 @@ def test_invites_by_users_ratelimit(self) -> None: class RoomJoinTestCase(RoomBase): - servlets = [ admin.register_servlets, login.register_servlets, @@ -2102,7 +2099,6 @@ class RoomSearchTestCase(unittest.HomeserverTestCase): hijack_auth = False def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - # Register the user who does the searching self.user_id2 = self.register_user("user", "pass") self.access_token = self.login("user", "pass") @@ -2195,7 +2191,6 @@ def test_include_context(self) -> None: class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -2203,7 +2198,6 @@ class PublicRoomsRestrictedTestCase(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.url = b"/_matrix/client/r0/publicRooms" config = self.default_config() @@ -2225,7 +2219,6 @@ def test_restricted_auth(self) -> None: class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -2233,7 +2226,6 @@ class PublicRoomsRoomTypeFilterTestCase(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["allow_public_rooms_without_auth"] = True self.hs = self.setup_test_homeserver(config=config) @@ -2414,7 +2406,6 @@ def test_fallback(self) -> None: class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -2983,7 +2974,6 @@ def _filter_messages(self, filter: JsonDict) -> List[str]: class ContextTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -3359,7 +3349,6 @@ def test_bad_alias(self) -> None: class ThreepidInviteTestCase(unittest.HomeserverTestCase): - servlets = [ admin.register_servlets, login.register_servlets, @@ -3438,7 +3427,8 @@ def test_threepid_invite_spamcheck(self) -> None: """ Test allowing/blocking threepid invites with a spam-check module. - In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`.""" + In this test, we use the more recent API in which callbacks return a `Union[Codes, Literal["NOT_SPAM"]]`. + """ # Mock a few functions to prevent the test from failing due to failing to talk to # a remote IS. We keep the mock for make_and_store_3pid_invite around so we # can check its call_count later on during the test. diff --git a/tests/rest/client/test_sync.py b/tests/rest/client/test_sync.py index b9047194dd63..9c876c7a3230 100644 --- a/tests/rest/client/test_sync.py +++ b/tests/rest/client/test_sync.py @@ -41,7 +41,6 @@ class FilterTestCase(unittest.HomeserverTestCase): - user_id = "@apple:test" servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -191,7 +190,6 @@ def _test_sync_filter_labels(self, sync_filter: str) -> List[JsonDict]: class SyncTypingTests(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, room.register_servlets, @@ -892,7 +890,6 @@ def test_user_with_no_rooms_receives_self_device_list_updates(self) -> None: class ExcludeRoomTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 5fa3440691e8..c0f93f898a79 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -137,6 +137,7 @@ def test_third_party_rules(self) -> None: """Tests that a forbidden event is forbidden from being sent, but an allowed one can be sent. """ + # patch the rules module with a Mock which will return False for some event # types async def check( @@ -243,6 +244,7 @@ async def check( def test_modify_event(self) -> None: """The module can return a modified version of the event""" + # first patch the event checker so that it will modify the event async def check( ev: EventBase, state: StateMap[EventBase] @@ -275,6 +277,7 @@ async def check( def test_message_edit(self) -> None: """Ensure that the module doesn't cause issues with edited messages.""" + # first patch the event checker so that it will modify the event async def check( ev: EventBase, state: StateMap[EventBase] diff --git a/tests/rest/media/test_media_retention.py b/tests/rest/media/test_media_retention.py index 23f227aed69f..b59d9dfd4da0 100644 --- a/tests/rest/media/test_media_retention.py +++ b/tests/rest/media/test_media_retention.py @@ -31,7 +31,6 @@ class MediaRetentionTestCase(unittest.HomeserverTestCase): - ONE_DAY_IN_MS = 24 * 60 * 60 * 1000 THIRTY_DAYS_IN_MS = 30 * ONE_DAY_IN_MS diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/rest/media/v1/test_media_storage.py index 17a3b06a8e25..8ed27179c46a 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/rest/media/v1/test_media_storage.py @@ -52,7 +52,6 @@ class MediaStorageTests(unittest.HomeserverTestCase): - needs_threadpool = True def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -207,7 +206,6 @@ class MediaRepoTests(unittest.HomeserverTestCase): user_id = "@test:user" def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - self.fetches: List[ Tuple[ "Deferred[Tuple[bytes, Tuple[int, Dict[bytes, List[bytes]]]]]", @@ -268,7 +266,6 @@ def write_to( return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - media_resource = hs.get_media_repository_resource() self.download_resource = media_resource.children[b"download"] self.thumbnail_resource = media_resource.children[b"thumbnail"] diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 2c321f8d04fc..6fcf60ce1904 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -58,7 +58,6 @@ class URLPreviewTests(unittest.HomeserverTestCase): ) def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - config = self.default_config() config["url_preview_enabled"] = True config["max_spider_size"] = 9999999 @@ -118,7 +117,6 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.media_repo = hs.get_media_repository_resource() self.preview_url = self.media_repo.children[b"preview_url"] @@ -133,7 +131,6 @@ def resolveHostName( addressTypes: Optional[Sequence[Type[IAddress]]] = None, transportSemantics: str = "TCP", ) -> IResolutionReceiver: - resolution = HostResolution(hostName) resolutionReceiver.resolutionBegan(resolution) if hostName not in self.lookups: diff --git a/tests/server_notices/test_consent.py b/tests/server_notices/test_consent.py index 6540ed53f173..3fdf5a6d5207 100644 --- a/tests/server_notices/test_consent.py +++ b/tests/server_notices/test_consent.py @@ -25,7 +25,6 @@ class ConsentNoticesTests(unittest.HomeserverTestCase): - servlets = [ sync.register_servlets, synapse.rest.admin.register_servlets_for_client_rest_resource, @@ -34,7 +33,6 @@ class ConsentNoticesTests(unittest.HomeserverTestCase): ] def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - tmpdir = self.mktemp() os.mkdir(tmpdir) self.consent_notice_message = "consent %(consent_uri)s" diff --git a/tests/storage/databases/main/test_deviceinbox.py b/tests/storage/databases/main/test_deviceinbox.py index 373707b275c7..b6d5c474b0bd 100644 --- a/tests/storage/databases/main/test_deviceinbox.py +++ b/tests/storage/databases/main/test_deviceinbox.py @@ -23,7 +23,6 @@ class DeviceInboxBackgroundUpdateStoreTestCase(HomeserverTestCase): - servlets = [ admin.register_servlets, devices.register_servlets, diff --git a/tests/storage/databases/main/test_receipts.py b/tests/storage/databases/main/test_receipts.py index ac77aec003b1..71db47405ef5 100644 --- a/tests/storage/databases/main/test_receipts.py +++ b/tests/storage/databases/main/test_receipts.py @@ -26,7 +26,6 @@ class ReceiptsBackgroundUpdateStoreTestCase(HomeserverTestCase): - servlets = [ admin.register_servlets, room.register_servlets, @@ -62,6 +61,7 @@ def _test_background_receipts_unique_index( keys and expected receipt key-values after duplicate receipts have been removed. """ + # First, undo the background update. def drop_receipts_unique_index(txn: LoggingTransaction) -> None: txn.execute(f"DROP INDEX IF EXISTS {index_name}") diff --git a/tests/storage/databases/main/test_room.py b/tests/storage/databases/main/test_room.py index 3108ca344423..dbd8f3a85eea 100644 --- a/tests/storage/databases/main/test_room.py +++ b/tests/storage/databases/main/test_room.py @@ -27,7 +27,6 @@ class RoomBackgroundUpdateStoreTestCase(HomeserverTestCase): - servlets = [ admin.register_servlets, room.register_servlets, diff --git a/tests/storage/test_client_ips.py b/tests/storage/test_client_ips.py index 7f7f4ef892bd..cd0079871ca2 100644 --- a/tests/storage/test_client_ips.py +++ b/tests/storage/test_client_ips.py @@ -656,7 +656,6 @@ def test_old_user_ips_pruned(self) -> None: class ClientIpAuthTestCase(unittest.HomeserverTestCase): - servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index a10e5fa8b147..73d11e7786fb 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -417,7 +417,6 @@ def _persist(txn: LoggingTransaction) -> None: def fetch_chains( self, events: List[EventBase] ) -> Tuple[Dict[str, Tuple[int, int]], _LinkMap]: - # Fetch the map from event ID -> (chain ID, sequence number) rows = self.get_success( self.store.db_pool.simple_select_many_batch( @@ -492,7 +491,6 @@ def test_simple(self) -> None: class EventChainBackgroundUpdateTestCase(HomeserverTestCase): - servlets = [ admin.register_servlets, room.register_servlets, diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 8fc7936ab094..3e1984c15cf0 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -672,7 +672,7 @@ def _setup_room_for_backfill_tests(self) -> _BackfillSetupInfo: complete_event_dict_map: Dict[str, JsonDict] = {} stream_ordering = 0 - for (event_id, prev_event_ids) in event_graph.items(): + for event_id, prev_event_ids in event_graph.items(): depth = depth_map[event_id] complete_event_dict_map[event_id] = { diff --git a/tests/storage/test_event_push_actions.py b/tests/storage/test_event_push_actions.py index 76c06a9d1e72..aa19c3bd30d5 100644 --- a/tests/storage/test_event_push_actions.py +++ b/tests/storage/test_event_push_actions.py @@ -774,7 +774,7 @@ def add_event(so: int, ts: int) -> None: self.assertEqual(r, 3) # add a bunch of dummy events to the events table - for (stream_ordering, ts) in ( + for stream_ordering, ts in ( (3, 110), (4, 120), (5, 120), diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index d8f42c5d0575..857e2caf2e24 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -23,7 +23,6 @@ class PurgeTests(HomeserverTestCase): - user_id = "@red:server" servlets = [room.register_servlets] diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 8794401823e1..f4c4661aafd8 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -27,7 +27,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): - servlets = [ login.register_servlets, register_servlets_for_client_rest_resource, @@ -35,7 +34,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: TestHomeServer) -> None: # type: ignore[override] - # We can't test the RoomMemberStore on its own without the other event # storage logic self.store = hs.get_datastores().main @@ -48,7 +46,6 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: TestHomeServer) -> N self.u_charlie = UserID.from_string("@charlie:elsewhere") def test_one_member(self) -> None: - # Alice creates the room, and is automatically joined self.room = self.helper.create_room_as(self.u_alice, tok=self.t_alice) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index f730b888f7d2..e82c03f5970b 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -242,7 +242,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters out members # with types=[] - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -259,7 +259,7 @@ def test_get_state_for_event(self) -> None: state_dict, ) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -272,7 +272,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # with wildcard types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -289,7 +289,7 @@ def test_get_state_for_event(self) -> None: state_dict, ) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -309,7 +309,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # with specific types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -327,7 +327,7 @@ def test_get_state_for_event(self) -> None: state_dict, ) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -341,7 +341,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # with specific types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -392,7 +392,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters out members # with types=[] room_id = self.room.to_string() - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -404,7 +404,7 @@ def test_get_state_for_event(self) -> None: self.assertDictEqual({}, state_dict) room_id = self.room.to_string() - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -417,7 +417,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # wildcard types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -428,7 +428,7 @@ def test_get_state_for_event(self) -> None: self.assertEqual(is_all, False) self.assertDictEqual({}, state_dict) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -447,7 +447,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # with specific types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -459,7 +459,7 @@ def test_get_state_for_event(self) -> None: self.assertEqual(is_all, False) self.assertDictEqual({}, state_dict) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( @@ -473,7 +473,7 @@ def test_get_state_for_event(self) -> None: # test _get_state_for_group_using_cache correctly filters in members # with specific types - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_cache, group, state_filter=StateFilter( @@ -485,7 +485,7 @@ def test_get_state_for_event(self) -> None: self.assertEqual(is_all, False) self.assertDictEqual({}, state_dict) - (state_dict, is_all,) = self.state_datastore._get_state_for_group_using_cache( + state_dict, is_all = self.state_datastore._get_state_for_group_using_cache( self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( diff --git a/tests/test_mau.py b/tests/test_mau.py index 4e7665a22b93..ff21098a5965 100644 --- a/tests/test_mau.py +++ b/tests/test_mau.py @@ -32,7 +32,6 @@ class TestMauLimit(unittest.HomeserverTestCase): - servlets = [register.register_servlets, sync.register_servlets] def default_config(self) -> JsonDict: From adac949a417d064958039ae0918b97388413c824 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 22 Feb 2023 15:30:41 -0500 Subject: [PATCH 227/344] Update .git-blame-ignore-revs for #15103. --- .git-blame-ignore-revs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index c3638c35eb14..839b895c82ca 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -21,4 +21,8 @@ aff1eb7c671b0a3813407321d2702ec46c71fa56 0a00b7ff14890987f09112a2ae696c61001e6cf1 # Convert tests/rest/admin/test_room.py to unix file endings (#7953). -c4268e3da64f1abb5b31deaeb5769adb6510c0a7 \ No newline at end of file +c4268e3da64f1abb5b31deaeb5769adb6510c0a7 + +# Update black to 23.1.0 (#15103) +9bb2eac71962970d02842bca441f4bcdbbf93a11 + From 452b009eb085387cec0b967a114e722624d276fb Mon Sep 17 00:00:00 2001 From: Fly <3713548+flyinghuman@users.noreply.github.com> Date: Thu, 23 Feb 2023 18:54:03 +0100 Subject: [PATCH 228/344] Documentation using Shibboleth with OIDC Plugin for SSO. (#15112) --- changelog.d/15112.doc | 1 + docs/openid.md | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) create mode 100644 changelog.d/15112.doc diff --git a/changelog.d/15112.doc b/changelog.d/15112.doc new file mode 100644 index 000000000000..7dec43a50b33 --- /dev/null +++ b/changelog.d/15112.doc @@ -0,0 +1 @@ +Document using [Shibboleth](https://www.shibboleth.net/) as an OpenID Provider. diff --git a/docs/openid.md b/docs/openid.md index 6ee8c83ec0a2..73f1e061210c 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -590,6 +590,47 @@ oidc_providers: Note that the fields `client_id` and `client_secret` are taken from the CURL response above. +### Shibboleth with OIDC Plugin + +[Shibboleth](https://www.shibboleth.net/) is an open Standard IdP solution widely used by Universities. + +1. Shibboleth needs the [OIDC Plugin](https://shibboleth.atlassian.net/wiki/spaces/IDPPLUGINS/pages/1376878976/OIDC+OP) installed and working correctly. +2. Create a new config on the IdP Side, ensure that the `client_id` and `client_secret` + are randomly generated data. +```json +{ + "client_id": "SOME-CLIENT-ID", + "client_secret": "SOME-SUPER-SECRET-SECRET", + "response_types": ["code"], + "grant_types": ["authorization_code"], + "scope": "openid profile email", + "redirect_uris": ["https://[synapse public baseurl]/_synapse/client/oidc/callback"] +} +``` + +Synapse config: + +```yaml +oidc_providers: + # Shibboleth IDP + # + - idp_id: shibboleth + idp_name: "Shibboleth Login" + discover: true + issuer: "https://YOUR-IDP-URL.TLD" + client_id: "YOUR_CLIENT_ID" + client_secret: "YOUR-CLIENT-SECRECT-FROM-YOUR-IDP" + scopes: ["openid", "profile", "email"] + allow_existing_users: true + user_profile_method: "userinfo_endpoint" + user_mapping_provider: + config: + subject_claim: "sub" + localpart_template: "{{ user.sub.split('@')[0] }}" + display_name_template: "{{ user.name }}" + email_template: "{{ user.email }}" +``` + ### Twitch 1. Setup a developer account on [Twitch](https://dev.twitch.tv/) From a068ad7dd4910c81bb0886fbf986dde126eeb4ee Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 23 Feb 2023 19:14:17 +0100 Subject: [PATCH 229/344] Add information on uploaded media to user export command. (#15107) --- changelog.d/15107.feature | 1 + docs/usage/administration/admin_faq.md | 74 ++++++++++++++++++++------ synapse/app/admin_cmd.py | 10 ++++ synapse/handlers/admin.py | 38 +++++++++++++ tests/handlers/test_admin.py | 29 ++++++++++ 5 files changed, 136 insertions(+), 16 deletions(-) create mode 100644 changelog.d/15107.feature diff --git a/changelog.d/15107.feature b/changelog.d/15107.feature new file mode 100644 index 000000000000..2bdb6a29fceb --- /dev/null +++ b/changelog.d/15107.feature @@ -0,0 +1 @@ +Add media information to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.79/usage/administration/admin_faq.html#how-can-i-export-user-data). \ No newline at end of file diff --git a/docs/usage/administration/admin_faq.md b/docs/usage/administration/admin_faq.md index 925e1d175e64..28c3dd53a5f4 100644 --- a/docs/usage/administration/admin_faq.md +++ b/docs/usage/administration/admin_faq.md @@ -70,13 +70,55 @@ output-directory │ ├───state │ ├───invite_state │ └───knock_state -└───user_data - ├───account_data - │ ├───global - │ └─── - ├───connections - ├───devices - └───profile +├───user_data +│ ├───account_data +│ │ ├───global +│ │ └─── +│ ├───connections +│ ├───devices +│ └───profile +└───media_ids + └─── +``` + +The `media_ids` folder contains only the metadata of the media uploaded by the user. +It does not contain the media itself. +Furthermore, only the `media_ids` that Synapse manages itself are exported. +If another media repository (e.g. [matrix-media-repo](https://github.com/turt2live/matrix-media-repo)) +is used, the data must be exported separately. + +With the `media_ids` the media files can be downloaded. +Media that have been sent in encrypted rooms are only retrieved in encrypted form. +The following script can help with download the media files: + +```bash +#!/usr/bin/env bash + +# Parameters +# +# source_directory: Directory which contains the export with the media_ids. +# target_directory: Directory into which all files are to be downloaded. +# repository_url: Address of the media repository resp. media worker. +# serverName: Name of the server (`server_name` from homeserver.yaml). +# +# Example: +# ./download_media.sh /tmp/export_data/media_ids/ /tmp/export_data/media_files/ http://localhost:8008 matrix.example.com + +source_directory=$1 +target_directory=$2 +repository_url=$3 +serverName=$4 + +mkdir -p $target_directory + +for file in $source_directory/*; do + filename=$(basename ${file}) + url=$repository_url/_matrix/media/v3/download/$serverName/$filename + echo "Downloading $filename - $url" + if ! wget -o /dev/null -P $target_directory $url; then + echo "Could not download $filename" + fi +done ``` Manually resetting passwords @@ -87,7 +129,7 @@ can reset a user's password using the [admin API](../../admin_api/user_admin_api I have a problem with my server. Can I just delete my database and start again? --- -Deleting your database is unlikely to make anything better. +Deleting your database is unlikely to make anything better. It's easy to make the mistake of thinking that you can start again from a clean slate by dropping your database, but things don't work like that in a federated @@ -102,7 +144,7 @@ Come and seek help in https://matrix.to/#/#synapse:matrix.org. There are two exceptions when it might be sensible to delete your database and start again: * You have *never* joined any rooms which are federated with other servers. For -instance, a local deployment which the outside world can't talk to. +instance, a local deployment which the outside world can't talk to. * You are changing the `server_name` in the homeserver configuration. In effect this makes your server a completely new one from the point of view of the network, so in this case it makes sense to start with a clean database. @@ -115,7 +157,7 @@ Using the following curl command: curl -H 'Authorization: Bearer ' -X DELETE https://matrix.org/_matrix/client/r0/directory/room/ ``` `` - can be obtained in riot by looking in the riot settings, down the bottom is: -Access Token:\ +Access Token:\ `` - the room alias, eg. #my_room:matrix.org this possibly needs to be URL encoded also, for example %23my_room%3Amatrix.org @@ -152,13 +194,13 @@ What are the biggest rooms on my server? --- ```sql -SELECT s.canonical_alias, g.room_id, count(*) AS num_rows -FROM - state_groups_state AS g, - room_stats_state AS s -WHERE g.room_id = s.room_id +SELECT s.canonical_alias, g.room_id, count(*) AS num_rows +FROM + state_groups_state AS g, + room_stats_state AS s +WHERE g.room_id = s.room_id GROUP BY s.canonical_alias, g.room_id -ORDER BY num_rows desc +ORDER BY num_rows desc LIMIT 10; ``` diff --git a/synapse/app/admin_cmd.py b/synapse/app/admin_cmd.py index 5003777f0de9..b05fe2c589c4 100644 --- a/synapse/app/admin_cmd.py +++ b/synapse/app/admin_cmd.py @@ -44,6 +44,7 @@ ) from synapse.storage.databases.main.events_worker import EventsWorkerStore from synapse.storage.databases.main.filtering import FilteringWorkerStore +from synapse.storage.databases.main.media_repository import MediaRepositoryStore from synapse.storage.databases.main.profile import ProfileWorkerStore from synapse.storage.databases.main.push_rule import PushRulesWorkerStore from synapse.storage.databases.main.receipts import ReceiptsWorkerStore @@ -86,6 +87,7 @@ class AdminCmdSlavedStore( RegistrationWorkerStore, RoomWorkerStore, ProfileWorkerStore, + MediaRepositoryStore, ): def __init__( self, @@ -235,6 +237,14 @@ def write_account_data( with open(account_data_file, "a") as f: json.dump(account_data, fp=f) + def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None: + file_directory = os.path.join(self.base_directory, "media_ids") + os.makedirs(file_directory, exist_ok=True) + media_id_file = os.path.join(file_directory, media_id) + + with open(media_id_file, "w") as f: + json.dump(media_metadata, fp=f) + def finished(self) -> str: return self.base_directory diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 8b7760b2cc07..b06f25b03c21 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -252,16 +252,19 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> profile = await self.get_user(UserID.from_string(user_id)) if profile is not None: writer.write_profile(profile) + logger.info("[%s] Written profile", user_id) # Get all devices the user has devices = await self._device_handler.get_devices_by_user(user_id) writer.write_devices(devices) + logger.info("[%s] Written %s devices", user_id, len(devices)) # Get all connections the user has connections = await self.get_whois(UserID.from_string(user_id)) writer.write_connections( connections["devices"][""]["sessions"][0]["connections"] ) + logger.info("[%s] Written %s connections", user_id, len(connections)) # Get all account data the user has global and in rooms global_data = await self._store.get_global_account_data_for_user(user_id) @@ -269,6 +272,29 @@ async def export_user_data(self, user_id: str, writer: "ExfiltrationWriter") -> writer.write_account_data("global", global_data) for room_id in by_room_data: writer.write_account_data(room_id, by_room_data[room_id]) + logger.info( + "[%s] Written account data for %s rooms", user_id, len(by_room_data) + ) + + # Get all media ids the user has + limit = 100 + start = 0 + while True: + media_ids, total = await self._store.get_local_media_by_user_paginate( + start, limit, user_id + ) + for media in media_ids: + writer.write_media_id(media["media_id"], media) + + logger.info( + "[%s] Written %d media_ids of %s", + user_id, + (start + len(media_ids)), + total, + ) + if (start + limit) >= total: + break + start += limit return writer.finished() @@ -359,6 +385,18 @@ def write_account_data( """ raise NotImplementedError() + @abc.abstractmethod + def write_media_id(self, media_id: str, media_metadata: JsonDict) -> None: + """Write the media's metadata of a user. + Exports only the metadata, as this can be fetched from the database via + read only. In order to access the files, a connection to the correct + media repository would be required. + + Args: + media_id: ID of the media. + media_metadata: Metadata of one media file. + """ + @abc.abstractmethod def finished(self) -> Any: """Called when all data has successfully been exported and written. diff --git a/tests/handlers/test_admin.py b/tests/handlers/test_admin.py index 1b97aaeed134..5569ccef8aef 100644 --- a/tests/handlers/test_admin.py +++ b/tests/handlers/test_admin.py @@ -23,6 +23,7 @@ from synapse.api.room_versions import RoomVersions from synapse.rest.client import knock, login, room from synapse.server import HomeServer +from synapse.types import UserID from synapse.util import Clock from tests import unittest @@ -323,3 +324,31 @@ def test_account_data(self) -> None: args = writer.write_account_data.call_args_list[1][0] self.assertEqual(args[0], "test_room") self.assertEqual(args[1]["m.per_room"]["b"], 2) + + def test_media_ids(self) -> None: + """Tests that media's metadata get exported.""" + + self.get_success( + self._store.store_local_media( + media_id="media_1", + media_type="image/png", + time_now_ms=self.clock.time_msec(), + upload_name=None, + media_length=50, + user_id=UserID.from_string(self.user2), + ) + ) + + writer = Mock() + + self.get_success(self.admin_handler.export_user_data(self.user2, writer)) + + writer.write_media_id.assert_called_once() + + args = writer.write_media_id.call_args[0] + self.assertEqual(args[0], "media_1") + self.assertEqual(args[1]["media_id"], "media_1") + self.assertEqual(args[1]["media_length"], 50) + self.assertGreater(args[1]["created_ts"], 0) + self.assertIsNone(args[1]["upload_name"]) + self.assertIsNone(args[1]["last_access_ts"]) From 1a1738eca233ee26e214ef810e2833f9483edf04 Mon Sep 17 00:00:00 2001 From: Centzilius Date: Thu, 23 Feb 2023 19:25:56 +0100 Subject: [PATCH 230/344] Fix typo in federation_verify_certificates in config documentation. (#15139) --- changelog.d/15139.doc | 1 + docs/usage/configuration/config_documentation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15139.doc diff --git a/changelog.d/15139.doc b/changelog.d/15139.doc new file mode 100644 index 000000000000..d8ab48b272dd --- /dev/null +++ b/changelog.d/15139.doc @@ -0,0 +1 @@ +Correct reference to `federation_verify_certificates` in configuration documentation. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index ab1f9f4963c9..4139961810f4 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1105,7 +1105,7 @@ This setting should only be used in very specific cases, such as federation over Tor hidden services and similar. For private networks of homeservers, you likely want to use a private CA instead. -Only effective if `federation_verify_certicates` is `true`. +Only effective if `federation_verify_certificates` is `true`. Example configuration: ```yaml From ec79870f1422be47e8d6e85f315799888278969b Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 23 Feb 2023 16:06:42 -0500 Subject: [PATCH 231/344] Fix a typo in MSC3873 config option. (#15138) Previously the experimental configuration option referred to the wrong MSC number. --- changelog.d/15138.misc | 1 + synapse/config/experimental.py | 4 ++-- synapse/push/bulk_push_rule_evaluator.py | 12 ++++++------ tests/push/test_push_rule_evaluator.py | 2 +- 4 files changed, 10 insertions(+), 9 deletions(-) create mode 100644 changelog.d/15138.misc diff --git a/changelog.d/15138.misc b/changelog.d/15138.misc new file mode 100644 index 000000000000..fb706b27f265 --- /dev/null +++ b/changelog.d/15138.misc @@ -0,0 +1 @@ +Fix a typo in an experimental config setting. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 54c91953e106..bc38fae0b6d1 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -175,8 +175,8 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: ) # MSC3873: Disambiguate event_match keys. - self.msc3783_escape_event_match_key = experimental.get( - "msc3783_escape_event_match_key", False + self.msc3873_escape_event_match_key = experimental.get( + "msc3873_escape_event_match_key", False ) # MSC3952: Intentional mentions, this depends on MSC3758. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 8f834be7742a..3c4a152d6b72 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -276,7 +276,7 @@ async def _related_events( if related_event is not None: related_events[relation_type] = _flatten_dict( related_event, - msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key, + msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key, ) reply_event_id = ( @@ -294,7 +294,7 @@ async def _related_events( if related_event is not None: related_events["m.in_reply_to"] = _flatten_dict( related_event, - msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key, + msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key, ) # indicate that this is from a fallback relation. @@ -412,7 +412,7 @@ async def _action_for_event_by_user( evaluator = PushRuleEvaluator( _flatten_dict( event, - msc3783_escape_event_match_key=self.hs.config.experimental.msc3783_escape_event_match_key, + msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key, ), has_mentions, user_mentions, @@ -507,7 +507,7 @@ def _flatten_dict( prefix: Optional[List[str]] = None, result: Optional[Dict[str, JsonValue]] = None, *, - msc3783_escape_event_match_key: bool = False, + msc3873_escape_event_match_key: bool = False, ) -> Dict[str, JsonValue]: """ Given a JSON dictionary (or event) which might contain sub dictionaries, @@ -536,7 +536,7 @@ def _flatten_dict( if result is None: result = {} for key, value in d.items(): - if msc3783_escape_event_match_key: + if msc3873_escape_event_match_key: # Escape periods in the key with a backslash (and backslashes with an # extra backslash). This is since a period is used as a separator between # nested fields. @@ -552,7 +552,7 @@ def _flatten_dict( value, prefix=(prefix + [key]), result=result, - msc3783_escape_event_match_key=msc3783_escape_event_match_key, + msc3873_escape_event_match_key=msc3873_escape_event_match_key, ) # `room_version` should only ever be set when looking at the top level of an event diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index d320a12f968d..4e858fd16f33 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -54,7 +54,7 @@ def test_nested(self) -> None: self.assertEqual({"m.foo.b\\ar": "abc"}, _flatten_dict(input)) self.assertEqual( {"m\\.foo.b\\\\ar": "abc"}, - _flatten_dict(input, msc3783_escape_event_match_key=True), + _flatten_dict(input, msc3873_escape_event_match_key=True), ) def test_non_string(self) -> None: From f8a584ed0259cbb3c3a51726d1008d04c26b4bd8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 23 Feb 2023 16:07:46 -0500 Subject: [PATCH 232/344] Stop parsing the unspecced type parameter on thumbnail requests. (#15137) Ideally we would replace this with parsing of the Accept header or something else, but for now just make Synapse spec compliant by ignoring the unspecced parameter. It does not seem that this is ever sent by a client, and even if it is there's a reasonable fallback. --- changelog.d/15137.removal | 1 + synapse/rest/media/v1/thumbnail_resource.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15137.removal diff --git a/changelog.d/15137.removal b/changelog.d/15137.removal new file mode 100644 index 000000000000..c533b0c9dd19 --- /dev/null +++ b/changelog.d/15137.removal @@ -0,0 +1 @@ +Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/v1/thumbnail_resource.py index 5f725c76009f..3e720018b31c 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/v1/thumbnail_resource.py @@ -69,7 +69,8 @@ async def _async_render_GET(self, request: SynapseRequest) -> None: width = parse_integer(request, "width", required=True) height = parse_integer(request, "height", required=True) method = parse_string(request, "method", "scale") - m_type = parse_string(request, "type", "image/png") + # TODO Parse the Accept header to get an prioritised list of thumbnail types. + m_type = "image/png" if server_name == self.server_name: if self.dynamic_thumbnails: From 682151a464f688768d5bd8308e16bd4024ad2e57 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 23 Feb 2023 16:08:53 -0500 Subject: [PATCH 233/344] Do not fail completely if oEmbed autodiscovery fails. (#15092) Previously if an autodiscovered oEmbed request failed (e.g. the oEmbed endpoint is down or does not exist) then the entire URL preview would fail. Instead we now return everything we can, even if this additional request fails. --- changelog.d/15092.bugfix | 1 + synapse/rest/media/v1/preview_url_resource.py | 33 +++++++++----- tests/rest/media/v1/test_url_preview.py | 44 +++++++++++++++++-- 3 files changed, 65 insertions(+), 13 deletions(-) create mode 100644 changelog.d/15092.bugfix diff --git a/changelog.d/15092.bugfix b/changelog.d/15092.bugfix new file mode 100644 index 000000000000..67509c5c692a --- /dev/null +++ b/changelog.d/15092.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where a URL preview would break if the discovered oEmbed failed to download. diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index a8f6fd6b3578..4a594ab9d83c 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -163,6 +163,10 @@ class PreviewUrlResource(DirectServeJsonResource): 7. Stores the result in the database cache. 4. Returns the result. + If any additional requests (e.g. from oEmbed autodiscovery, step 5.3 or + image thumbnailing, step 5.4 or 6.4) fails then the URL preview as a whole + does not fail. As much information as possible is returned. + The in-memory cache expires after 1 hour. Expired entries in the database cache (and their associated media files) are @@ -364,16 +368,25 @@ async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: oembed_url = self._oembed.autodiscover_from_html(tree) og_from_oembed: JsonDict = {} if oembed_url: - oembed_info = await self._handle_url( - oembed_url, user, allow_data_urls=True - ) - ( - og_from_oembed, - author_name, - expiration_ms, - ) = await self._handle_oembed_response( - url, oembed_info, expiration_ms - ) + try: + oembed_info = await self._handle_url( + oembed_url, user, allow_data_urls=True + ) + except Exception as e: + # Fetching the oEmbed info failed, don't block the entire URL preview. + logger.warning( + "oEmbed fetch failed during URL preview: %s errored with %s", + oembed_url, + e, + ) + else: + ( + og_from_oembed, + author_name, + expiration_ms, + ) = await self._handle_oembed_response( + url, oembed_info, expiration_ms + ) # Parse Open Graph information from the HTML in case the oEmbed # response failed or is incomplete. diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 6fcf60ce1904..2acfccec6170 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -657,7 +657,7 @@ def test_nonexistent_image(self) -> None: """If the preview image doesn't exist, ensure some data is returned.""" self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] - end_content = ( + result = ( b"""""" ) @@ -678,8 +678,8 @@ def test_nonexistent_image(self) -> None: b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" b'Content-Type: text/html; charset="utf8"\r\n\r\n' ) - % (len(end_content),) - + end_content + % (len(result),) + + result ) self.pump() @@ -688,6 +688,44 @@ def test_nonexistent_image(self) -> None: # The image should not be in the result. self.assertNotIn("og:image", channel.json_body) + def test_oembed_failure(self) -> None: + """If the autodiscovered oEmbed URL fails, ensure some data is returned.""" + self.lookups["matrix.org"] = [(IPv4Address, "10.1.2.3")] + + result = b""" + oEmbed Autodiscovery Fail + + """ + + channel = self.make_request( + "GET", + "preview_url?url=http://matrix.org", + shorthand=False, + await_result=False, + ) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="utf8"\r\n\r\n' + ) + % (len(result),) + + result + ) + + self.pump() + self.assertEqual(channel.code, 200) + + # The image should not be in the result. + self.assertEqual(channel.json_body["og:title"], "oEmbed Autodiscovery Fail") + def test_data_url(self) -> None: """ Requesting to preview a data URL is not supported. From 335f52d595c2c32e4b512b97e2851bc98b819ca7 Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Fri, 24 Feb 2023 13:39:45 +0000 Subject: [PATCH 234/344] Improve handling of non-ASCII characters in user directory search (#15143) * Fix a long-standing bug where non-ASCII characters in search terms, including accented letters, would not match characters in a different case. * Fix a long-standing bug where search terms using combining accents would not match display names using precomposed accents and vice versa. To fully take effect, the user directory must be rebuilt after this change. Fixes #14630. Signed-off-by: Sean Quah --- changelog.d/15143.misc | 1 + .../storage/databases/main/user_directory.py | 52 ++++++- tests/storage/test_user_directory.py | 133 ++++++++++++++++++ 3 files changed, 184 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15143.misc diff --git a/changelog.d/15143.misc b/changelog.d/15143.misc new file mode 100644 index 000000000000..cff4518811ec --- /dev/null +++ b/changelog.d/15143.misc @@ -0,0 +1 @@ +Fix a long-standing bug where the user directory search was not case-insensitive for accented characters. diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index c3f2b61bd543..f16a509ac496 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -14,6 +14,7 @@ import logging import re +import unicodedata from typing import ( TYPE_CHECKING, Iterable, @@ -490,6 +491,11 @@ def _update_profile_in_user_dir_txn(txn: LoggingTransaction) -> None: values={"display_name": display_name, "avatar_url": avatar_url}, ) + # The display name that goes into the database index. + index_display_name = display_name + if index_display_name is not None: + index_display_name = _filter_text_for_index(index_display_name) + if isinstance(self.database_engine, PostgresEngine): # We weight the localpart most highly, then display name and finally # server name @@ -507,11 +513,15 @@ def _update_profile_in_user_dir_txn(txn: LoggingTransaction) -> None: user_id, get_localpart_from_id(user_id), get_domain_from_id(user_id), - display_name, + index_display_name, ), ) elif isinstance(self.database_engine, Sqlite3Engine): - value = "%s %s" % (user_id, display_name) if display_name else user_id + value = ( + "%s %s" % (user_id, index_display_name) + if index_display_name + else user_id + ) self.db_pool.simple_upsert_txn( txn, table="user_directory_search", @@ -896,6 +906,41 @@ async def search_user_dir( return {"limited": limited, "results": results[0:limit]} +def _filter_text_for_index(text: str) -> str: + """Transforms text before it is inserted into the user directory index, or searched + for in the user directory index. + + Note that the user directory search table needs to be rebuilt whenever this function + changes. + """ + # Lowercase the text, to make searches case-insensitive. + # This is necessary for both PostgreSQL and SQLite. PostgreSQL's + # `to_tsquery/to_tsvector` functions don't lowercase non-ASCII characters when using + # the "C" collation, while SQLite just doesn't lowercase non-ASCII characters at + # all. + text = text.lower() + + # Normalize the text. NFKC normalization has two effects: + # 1. It canonicalizes the text, ie. maps all visually identical strings to the same + # string. For example, ["e", "◌́"] is mapped to ["é"]. + # 2. It maps strings that are roughly equivalent to the same string. + # For example, ["dž"] is mapped to ["d", "ž"], ["①"] to ["1"] and ["i⁹"] to + # ["i", "9"]. + text = unicodedata.normalize("NFKC", text) + + # Note that nothing is done to make searches accent-insensitive. + # That could be achieved by converting to NFKD form instead (with combining accents + # split out) and filtering out combining accents using `unicodedata.combining(c)`. + # The downside of this may be noisier search results, since search terms with + # explicit accents will match characters with no accents, or completely different + # accents. + # + # text = unicodedata.normalize("NFKD", text) + # text = "".join([c for c in text if not unicodedata.combining(c)]) + + return text + + def _parse_query_sqlite(search_term: str) -> str: """Takes a plain unicode string from the user and converts it into a form that can be passed to database. @@ -905,6 +950,7 @@ def _parse_query_sqlite(search_term: str) -> str: We specifically add both a prefix and non prefix matching term so that exact matches get ranked higher. """ + search_term = _filter_text_for_index(search_term) # Pull out the individual words, discarding any non-word characters. results = _parse_words(search_term) @@ -917,6 +963,8 @@ def _parse_query_postgres(search_term: str) -> Tuple[str, str, str]: We use this so that we can add prefix matching, which isn't something that is supported by default. """ + search_term = _filter_text_for_index(search_term) + escaped_words = [] for word in _parse_words(search_term): # Postgres tsvector and tsquery quoting rules: diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 2d169684cf2e..43b724c4ddd7 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -504,6 +504,139 @@ def test_search_user_dir_start_of_user_id(self) -> None: {"user_id": BELA, "display_name": "Bela", "avatar_url": None}, ) + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_ascii_case_insensitivity(self) -> None: + """Tests that a user can look up another user by searching for their name in a + different case. + """ + CHARLIE = "@someuser:example.org" + self.get_success( + self.store.update_profile_in_user_dir(CHARLIE, "Charlie", None) + ) + + r = self.get_success(self.store.search_user_dir(ALICE, "cHARLIE", 10)) + self.assertFalse(r["limited"]) + self.assertEqual(1, len(r["results"])) + self.assertDictEqual( + r["results"][0], + {"user_id": CHARLIE, "display_name": "Charlie", "avatar_url": None}, + ) + + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_unicode_case_insensitivity(self) -> None: + """Tests that a user can look up another user by searching for their name in a + different case. + """ + IVAN = "@someuser:example.org" + self.get_success(self.store.update_profile_in_user_dir(IVAN, "Иван", None)) + + r = self.get_success(self.store.search_user_dir(ALICE, "иВАН", 10)) + self.assertFalse(r["limited"]) + self.assertEqual(1, len(r["results"])) + self.assertDictEqual( + r["results"][0], + {"user_id": IVAN, "display_name": "Иван", "avatar_url": None}, + ) + + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_dotted_dotless_i_case_insensitivity(self) -> None: + """Tests that a user can look up another user by searching for their name in a + different case, when their name contains dotted or dotless "i"s. + + Some languages have dotted and dotless versions of "i", which are considered to + be different letters: i <-> İ, ı <-> I. To make things difficult, they reuse the + ASCII "i" and "I" code points, despite having different lowercase / uppercase + forms. + """ + USER = "@someuser:example.org" + + expected_matches = [ + # (search_term, display_name) + # A search for "i" should match "İ". + ("iiiii", "İİİİİ"), + # A search for "I" should match "ı". + ("IIIII", "ııııı"), + # A search for "ı" should match "I". + ("ııııı", "IIIII"), + # A search for "İ" should match "i". + ("İİİİİ", "iiiii"), + ] + + for search_term, display_name in expected_matches: + self.get_success( + self.store.update_profile_in_user_dir(USER, display_name, None) + ) + + r = self.get_success(self.store.search_user_dir(ALICE, search_term, 10)) + self.assertFalse(r["limited"]) + self.assertEqual( + 1, + len(r["results"]), + f"searching for {search_term!r} did not match {display_name!r}", + ) + self.assertDictEqual( + r["results"][0], + {"user_id": USER, "display_name": display_name, "avatar_url": None}, + ) + + # We don't test for negative matches, to allow implementations that consider all + # the i variants to be the same. + + test_search_user_dir_dotted_dotless_i_case_insensitivity.skip = "not supported" # type: ignore + + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_unicode_normalization(self) -> None: + """Tests that a user can look up another user by searching for their name with + either composed or decomposed accents. + """ + AMELIE = "@someuser:example.org" + + expected_matches = [ + # (search_term, display_name) + ("Ame\u0301lie", "Amélie"), + ("Amélie", "Ame\u0301lie"), + ] + + for search_term, display_name in expected_matches: + self.get_success( + self.store.update_profile_in_user_dir(AMELIE, display_name, None) + ) + + r = self.get_success(self.store.search_user_dir(ALICE, search_term, 10)) + self.assertFalse(r["limited"]) + self.assertEqual( + 1, + len(r["results"]), + f"searching for {search_term!r} did not match {display_name!r}", + ) + self.assertDictEqual( + r["results"][0], + {"user_id": AMELIE, "display_name": display_name, "avatar_url": None}, + ) + + @override_config({"user_directory": {"search_all_users": True}}) + def test_search_user_dir_accent_insensitivity(self) -> None: + """Tests that a user can look up another user by searching for their name + without any accents. + """ + AMELIE = "@someuser:example.org" + self.get_success(self.store.update_profile_in_user_dir(AMELIE, "Amélie", None)) + + r = self.get_success(self.store.search_user_dir(ALICE, "amelie", 10)) + self.assertFalse(r["limited"]) + self.assertEqual(1, len(r["results"])) + self.assertDictEqual( + r["results"][0], + {"user_id": AMELIE, "display_name": "Amélie", "avatar_url": None}, + ) + + # It may be desirable for "é"s in search terms to not match plain "e"s and we + # really don't want "é"s in search terms to match "e"s with different accents. + # But we don't test for this to allow implementations that consider all + # "e"-lookalikes to be the same. + + test_search_user_dir_accent_insensitivity.skip = "not supported yet" # type: ignore + class UserDirectoryStoreTestCaseWithIcu(UserDirectoryStoreTestCase): use_icu = True From b2357a898cdd1f4a2222609abfe471801ea88dcd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 24 Feb 2023 14:39:50 +0000 Subject: [PATCH 235/344] Fix bug where 5s delays would occasionally happen. (#15150) This only affects deployments using workers. --- changelog.d/15150.bugfix | 1 + synapse/replication/tcp/resource.py | 18 ++++++++ tests/replication/tcp/test_handler.py | 61 +++++++++++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 changelog.d/15150.bugfix diff --git a/changelog.d/15150.bugfix b/changelog.d/15150.bugfix new file mode 100644 index 000000000000..8668bc587f8b --- /dev/null +++ b/changelog.d/15150.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.76 where 5s delays would occasionally occur in deployments using workers. diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 9d17eff71451..347467d863ec 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -238,6 +238,24 @@ async def _run_notifier_loop(self) -> None: except Exception: logger.exception("Failed to replicate") + # The last token we send may not match the current + # token, in which case we want to send out a `POSITION` + # to tell other workers the actual current position. + if updates[-1][0] < current_token: + logger.info( + "Sending position: %s -> %s", + stream.NAME, + current_token, + ) + self.command_handler.send_command( + PositionCommand( + stream.NAME, + self._instance_name, + updates[-1][0], + current_token, + ) + ) + logger.debug("No more pending updates, breaking poke loop") finally: self.pending_updates = False diff --git a/tests/replication/tcp/test_handler.py b/tests/replication/tcp/test_handler.py index bf927beb6a87..bab77b2df787 100644 --- a/tests/replication/tcp/test_handler.py +++ b/tests/replication/tcp/test_handler.py @@ -141,3 +141,64 @@ def test_wait_for_stream_position(self) -> None: self.get_success(ctx_worker1.__aexit__(None, None, None)) self.assertTrue(d.called) + + def test_wait_for_stream_position_rdata(self) -> None: + """Check that wait for stream position correctly waits for an update + from the correct instance, when RDATA is sent. + """ + store = self.hs.get_datastores().main + cmd_handler = self.hs.get_replication_command_handler() + data_handler = self.hs.get_replication_data_handler() + + worker1 = self.make_worker_hs( + "synapse.app.generic_worker", + extra_config={ + "worker_name": "worker1", + "run_background_tasks_on": "worker1", + "redis": {"enabled": True}, + }, + ) + + cache_id_gen = worker1.get_datastores().main._cache_id_gen + assert cache_id_gen is not None + + self.replicate() + + # First, make sure the master knows that `worker1` exists. + initial_token = cache_id_gen.get_current_token() + cmd_handler.send_command( + PositionCommand("caches", "worker1", initial_token, initial_token) + ) + self.replicate() + + # `wait_for_stream_position` should only return once master receives a + # notification that `next_token2` has persisted. + ctx_worker1 = cache_id_gen.get_next_mult(2) + next_token1, next_token2 = self.get_success(ctx_worker1.__aenter__()) + + d = defer.ensureDeferred( + data_handler.wait_for_stream_position("worker1", "caches", next_token2) + ) + self.assertFalse(d.called) + + # Insert an entry into the cache stream with token `next_token1`, but + # not `next_token2`. + self.get_success( + store.db_pool.simple_insert( + table="cache_invalidation_stream_by_instance", + values={ + "stream_id": next_token1, + "instance_name": "worker1", + "cache_func": "foo", + "keys": [], + "invalidation_ts": 0, + }, + ) + ) + + # Finish the context manager, triggering the data to be sent to master. + self.get_success(ctx_worker1.__aexit__(None, None, None)) + + # Master should get told about `next_token2`, so the deferred should + # resolve. + self.assertTrue(d.called) From 1c95ddd09bbc46046a3412e7bb03a87aa3b6f65a Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 24 Feb 2023 13:15:29 -0800 Subject: [PATCH 236/344] Batch up storing state groups when creating new room (#14918) --- changelog.d/14918.misc | 1 + synapse/events/snapshot.py | 49 ++++++++ synapse/handlers/message.py | 16 ++- synapse/handlers/room.py | 37 +++--- synapse/handlers/room_batch.py | 4 +- synapse/handlers/room_member.py | 13 +- synapse/storage/databases/state/store.py | 119 ++++++++++++++++++ tests/handlers/test_message.py | 25 ++-- tests/handlers/test_register.py | 3 +- tests/push/test_bulk_push_rule_evaluator.py | 13 +- tests/rest/client/test_rooms.py | 4 +- tests/storage/test_event_chain.py | 6 +- tests/storage/test_state.py | 126 ++++++++++++++++++++ tests/unittest.py | 4 +- 14 files changed, 371 insertions(+), 49 deletions(-) create mode 100644 changelog.d/14918.misc diff --git a/changelog.d/14918.misc b/changelog.d/14918.misc new file mode 100644 index 000000000000..828794354acd --- /dev/null +++ b/changelog.d/14918.misc @@ -0,0 +1 @@ +Batch up storing state groups when creating a new room. \ No newline at end of file diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index e0d82ad81cf9..a91a5d1e3cbe 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -23,6 +23,7 @@ if TYPE_CHECKING: from synapse.storage.controllers import StorageControllers + from synapse.storage.databases import StateGroupDataStore from synapse.storage.databases.main import DataStore from synapse.types.state import StateFilter @@ -348,6 +349,54 @@ class UnpersistedEventContext(UnpersistedEventContextBase): partial_state: bool state_map_before_event: Optional[StateMap[str]] = None + @classmethod + async def batch_persist_unpersisted_contexts( + cls, + events_and_context: List[Tuple[EventBase, "UnpersistedEventContextBase"]], + room_id: str, + last_known_state_group: int, + datastore: "StateGroupDataStore", + ) -> List[Tuple[EventBase, EventContext]]: + """ + Takes a list of events and their associated unpersisted contexts and persists + the unpersisted contexts, returning a list of events and persisted contexts. + Note that all the events must be in a linear chain (ie a <- b <- c). + + Args: + events_and_context: A list of events and their unpersisted contexts + room_id: the room_id for the events + last_known_state_group: the last persisted state group + datastore: a state datastore + """ + amended_events_and_context = await datastore.store_state_deltas_for_batched( + events_and_context, room_id, last_known_state_group + ) + + events_and_persisted_context = [] + for event, unpersisted_context in amended_events_and_context: + if event.is_state(): + context = EventContext( + storage=unpersisted_context._storage, + state_group=unpersisted_context.state_group_after_event, + state_group_before_event=unpersisted_context.state_group_before_event, + state_delta_due_to_event=unpersisted_context.state_delta_due_to_event, + partial_state=unpersisted_context.partial_state, + prev_group=unpersisted_context.state_group_before_event, + delta_ids=unpersisted_context.state_delta_due_to_event, + ) + else: + context = EventContext( + storage=unpersisted_context._storage, + state_group=unpersisted_context.state_group_after_event, + state_group_before_event=unpersisted_context.state_group_before_event, + state_delta_due_to_event=unpersisted_context.state_delta_due_to_event, + partial_state=unpersisted_context.partial_state, + prev_group=unpersisted_context.prev_group_for_state_group_before_event, + delta_ids=unpersisted_context.delta_ids_to_state_group_before_event, + ) + events_and_persisted_context.append((event, context)) + return events_and_persisted_context + async def get_prev_state_ids( self, state_filter: Optional["StateFilter"] = None ) -> StateMap[str]: diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index aa90d0000d40..e433d6b01f02 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -574,7 +574,7 @@ async def create_event( state_map: Optional[StateMap[str]] = None, for_batch: bool = False, current_state_group: Optional[int] = None, - ) -> Tuple[EventBase, EventContext]: + ) -> Tuple[EventBase, UnpersistedEventContextBase]: """ Given a dict from a client, create a new event. If bool for_batch is true, will create an event using the prev_event_ids, and will create an event context for @@ -721,8 +721,6 @@ async def create_event( current_state_group=current_state_group, ) - context = await unpersisted_context.persist(event) - # In an ideal world we wouldn't need the second part of this condition. However, # this behaviour isn't spec'd yet, meaning we should be able to deactivate this # behaviour. Another reason is that this code is also evaluated each time a new @@ -739,7 +737,7 @@ async def create_event( assert state_map is not None prev_event_id = state_map.get((EventTypes.Member, event.sender)) else: - prev_state_ids = await context.get_prev_state_ids( + prev_state_ids = await unpersisted_context.get_prev_state_ids( StateFilter.from_types([(EventTypes.Member, None)]) ) prev_event_id = prev_state_ids.get((EventTypes.Member, event.sender)) @@ -764,8 +762,7 @@ async def create_event( ) self.validator.validate_new(event, self.config) - - return event, context + return event, unpersisted_context async def _is_exempt_from_privacy_policy( self, builder: EventBuilder, requester: Requester @@ -1005,7 +1002,7 @@ async def create_and_send_nonmember_event( max_retries = 5 for i in range(max_retries): try: - event, context = await self.create_event( + event, unpersisted_context = await self.create_event( requester, event_dict, txn_id=txn_id, @@ -1016,6 +1013,7 @@ async def create_and_send_nonmember_event( historical=historical, depth=depth, ) + context = await unpersisted_context.persist(event) assert self.hs.is_mine_id(event.sender), "User must be our own: %s" % ( event.sender, @@ -1190,7 +1188,6 @@ async def create_new_client_event( if for_batch: assert prev_event_ids is not None assert state_map is not None - assert current_state_group is not None auth_ids = self._event_auth_handler.compute_auth_events(builder, state_map) event = await builder.build( prev_event_ids=prev_event_ids, auth_event_ids=auth_ids, depth=depth @@ -2046,7 +2043,7 @@ async def _send_dummy_event_for_room(self, room_id: str) -> bool: max_retries = 5 for i in range(max_retries): try: - event, context = await self.create_event( + event, unpersisted_context = await self.create_event( requester, { "type": EventTypes.Dummy, @@ -2055,6 +2052,7 @@ async def _send_dummy_event_for_room(self, room_id: str) -> bool: "sender": user_id, }, ) + context = await unpersisted_context.persist(event) event.internal_metadata.proactively_send = False diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index a26ec02284bf..b1784638f4ba 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -51,6 +51,7 @@ from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersion from synapse.event_auth import validate_event_for_room_version from synapse.events import EventBase +from synapse.events.snapshot import UnpersistedEventContext from synapse.events.utils import copy_and_fixup_power_levels_contents from synapse.handlers.relations import BundledAggregations from synapse.module_api import NOT_SPAM @@ -211,7 +212,7 @@ async def upgrade_room( # the required power level to send the tombstone event. ( tombstone_event, - tombstone_context, + tombstone_unpersisted_context, ) = await self.event_creation_handler.create_event( requester, { @@ -225,6 +226,9 @@ async def upgrade_room( }, }, ) + tombstone_context = await tombstone_unpersisted_context.persist( + tombstone_event + ) validate_event_for_room_version(tombstone_event) await self._event_auth_handler.check_auth_rules_from_context( tombstone_event @@ -1092,7 +1096,7 @@ async def create_event( content: JsonDict, for_batch: bool, **kwargs: Any, - ) -> Tuple[EventBase, synapse.events.snapshot.EventContext]: + ) -> Tuple[EventBase, synapse.events.snapshot.UnpersistedEventContextBase]: """ Creates an event and associated event context. Args: @@ -1111,20 +1115,23 @@ async def create_event( event_dict = create_event_dict(etype, content, **kwargs) - new_event, new_context = await self.event_creation_handler.create_event( + ( + new_event, + new_unpersisted_context, + ) = await self.event_creation_handler.create_event( creator, event_dict, prev_event_ids=prev_event, depth=depth, state_map=state_map, for_batch=for_batch, - current_state_group=current_state_group, ) + depth += 1 prev_event = [new_event.event_id] state_map[(new_event.type, new_event.state_key)] = new_event.event_id - return new_event, new_context + return new_event, new_unpersisted_context try: config = self._presets_dict[preset_config] @@ -1134,10 +1141,10 @@ async def create_event( ) creation_content.update({"creator": creator_id}) - creation_event, creation_context = await create_event( + creation_event, unpersisted_creation_context = await create_event( EventTypes.Create, creation_content, False ) - + creation_context = await unpersisted_creation_context.persist(creation_event) logger.debug("Sending %s in new room", EventTypes.Member) ev = await self.event_creation_handler.handle_new_client_event( requester=creator, @@ -1181,7 +1188,6 @@ async def create_event( power_event, power_context = await create_event( EventTypes.PowerLevels, pl_content, True ) - current_state_group = power_context._state_group events_to_send.append((power_event, power_context)) else: power_level_content: JsonDict = { @@ -1230,14 +1236,12 @@ async def create_event( power_level_content, True, ) - current_state_group = pl_context._state_group events_to_send.append((pl_event, pl_context)) if room_alias and (EventTypes.CanonicalAlias, "") not in initial_state: room_alias_event, room_alias_context = await create_event( EventTypes.CanonicalAlias, {"alias": room_alias.to_string()}, True ) - current_state_group = room_alias_context._state_group events_to_send.append((room_alias_event, room_alias_context)) if (EventTypes.JoinRules, "") not in initial_state: @@ -1246,7 +1250,6 @@ async def create_event( {"join_rule": config["join_rules"]}, True, ) - current_state_group = join_rules_context._state_group events_to_send.append((join_rules_event, join_rules_context)) if (EventTypes.RoomHistoryVisibility, "") not in initial_state: @@ -1255,7 +1258,6 @@ async def create_event( {"history_visibility": config["history_visibility"]}, True, ) - current_state_group = visibility_context._state_group events_to_send.append((visibility_event, visibility_context)) if config["guest_can_join"]: @@ -1265,14 +1267,12 @@ async def create_event( {EventContentFields.GUEST_ACCESS: GuestAccess.CAN_JOIN}, True, ) - current_state_group = guest_access_context._state_group events_to_send.append((guest_access_event, guest_access_context)) for (etype, state_key), content in initial_state.items(): event, context = await create_event( etype, content, True, state_key=state_key ) - current_state_group = context._state_group events_to_send.append((event, context)) if config["encrypted"]: @@ -1284,9 +1284,16 @@ async def create_event( ) events_to_send.append((encryption_event, encryption_context)) + datastore = self.hs.get_datastores().state + events_and_context = ( + await UnpersistedEventContext.batch_persist_unpersisted_contexts( + events_to_send, room_id, current_state_group, datastore + ) + ) + last_event = await self.event_creation_handler.handle_new_client_event( creator, - events_to_send, + events_and_context, ignore_shadow_ban=True, ratelimit=False, ) diff --git a/synapse/handlers/room_batch.py b/synapse/handlers/room_batch.py index 5d4ca0e2d21a..bf9df60218a1 100644 --- a/synapse/handlers/room_batch.py +++ b/synapse/handlers/room_batch.py @@ -327,7 +327,7 @@ async def persist_historical_events( # Mark all events as historical event_dict["content"][EventContentFields.MSC2716_HISTORICAL] = True - event, context = await self.event_creation_handler.create_event( + event, unpersisted_context = await self.event_creation_handler.create_event( await self.create_requester_for_user_id_from_app_service( ev["sender"], app_service_requester.app_service ), @@ -345,7 +345,7 @@ async def persist_historical_events( historical=True, depth=inherited_depth, ) - + context = await unpersisted_context.persist(event) assert context._state_group # Normally this is done when persisting the event but we have to diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index a965c7ec7698..de7476f3005d 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -414,7 +414,10 @@ async def _local_membership_update( max_retries = 5 for i in range(max_retries): try: - event, context = await self.event_creation_handler.create_event( + ( + event, + unpersisted_context, + ) = await self.event_creation_handler.create_event( requester, { "type": EventTypes.Member, @@ -435,7 +438,7 @@ async def _local_membership_update( outlier=outlier, historical=historical, ) - + context = await unpersisted_context.persist(event) prev_state_ids = await context.get_prev_state_ids( StateFilter.from_types([(EventTypes.Member, None)]) ) @@ -1944,7 +1947,10 @@ async def _generate_local_out_of_band_leave( max_retries = 5 for i in range(max_retries): try: - event, context = await self.event_creation_handler.create_event( + ( + event, + unpersisted_context, + ) = await self.event_creation_handler.create_event( requester, event_dict, txn_id=txn_id, @@ -1952,6 +1958,7 @@ async def _generate_local_out_of_band_leave( auth_event_ids=auth_event_ids, outlier=True, ) + context = await unpersisted_context.persist(event) event.internal_metadata.out_of_band_membership = True result_event = ( diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 89b1faa6c834..bf4cdfdf2917 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -18,6 +18,8 @@ import attr from synapse.api.constants import EventTypes +from synapse.events import EventBase +from synapse.events.snapshot import UnpersistedEventContext, UnpersistedEventContextBase from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, @@ -401,6 +403,123 @@ def _insert_into_cache( fetched_keys=non_member_types, ) + async def store_state_deltas_for_batched( + self, + events_and_context: List[Tuple[EventBase, UnpersistedEventContextBase]], + room_id: str, + prev_group: int, + ) -> List[Tuple[EventBase, UnpersistedEventContext]]: + """Generate and store state deltas for a group of events and contexts created to be + batch persisted. Note that all the events must be in a linear chain (ie a <- b <- c). + + Args: + events_and_context: the events to generate and store a state groups for + and their associated contexts + room_id: the id of the room the events were created for + prev_group: the state group of the last event persisted before the batched events + were created + """ + + def insert_deltas_group_txn( + txn: LoggingTransaction, + events_and_context: List[Tuple[EventBase, UnpersistedEventContext]], + prev_group: int, + ) -> List[Tuple[EventBase, UnpersistedEventContext]]: + """Generate and store state groups for the provided events and contexts. + + Requires that we have the state as a delta from the last persisted state group. + + Returns: + A list of state groups + """ + is_in_db = self.db_pool.simple_select_one_onecol_txn( + txn, + table="state_groups", + keyvalues={"id": prev_group}, + retcol="id", + allow_none=True, + ) + if not is_in_db: + raise Exception( + "Trying to persist state with unpersisted prev_group: %r" + % (prev_group,) + ) + + num_state_groups = sum( + 1 for event, _ in events_and_context if event.is_state() + ) + + state_groups = self._state_group_seq_gen.get_next_mult_txn( + txn, num_state_groups + ) + + sg_before = prev_group + state_group_iter = iter(state_groups) + for event, context in events_and_context: + if not event.is_state(): + context.state_group_after_event = sg_before + context.state_group_before_event = sg_before + continue + + sg_after = next(state_group_iter) + context.state_group_after_event = sg_after + context.state_group_before_event = sg_before + context.state_delta_due_to_event = { + (event.type, event.state_key): event.event_id + } + sg_before = sg_after + + self.db_pool.simple_insert_many_txn( + txn, + table="state_groups", + keys=("id", "room_id", "event_id"), + values=[ + (context.state_group_after_event, room_id, event.event_id) + for event, context in events_and_context + if event.is_state() + ], + ) + + self.db_pool.simple_insert_many_txn( + txn, + table="state_group_edges", + keys=("state_group", "prev_state_group"), + values=[ + ( + context.state_group_after_event, + context.state_group_before_event, + ) + for event, context in events_and_context + if event.is_state() + ], + ) + + self.db_pool.simple_insert_many_txn( + txn, + table="state_groups_state", + keys=("state_group", "room_id", "type", "state_key", "event_id"), + values=[ + ( + context.state_group_after_event, + room_id, + key[0], + key[1], + state_id, + ) + for event, context in events_and_context + if context.state_delta_due_to_event is not None + for key, state_id in context.state_delta_due_to_event.items() + ], + ) + return events_and_context + + return await self.db_pool.runInteraction( + "store_state_deltas_for_batched.insert_deltas_group", + insert_deltas_group_txn, + events_and_context, + prev_group, + ) + async def store_state_group( self, event_id: str, diff --git a/tests/handlers/test_message.py b/tests/handlers/test_message.py index 69d384442f43..9691d66b48a0 100644 --- a/tests/handlers/test_message.py +++ b/tests/handlers/test_message.py @@ -18,7 +18,7 @@ from synapse.api.constants import EventTypes from synapse.events import EventBase -from synapse.events.snapshot import EventContext +from synapse.events.snapshot import EventContext, UnpersistedEventContextBase from synapse.rest import admin from synapse.rest.client import login, room from synapse.server import HomeServer @@ -79,7 +79,9 @@ def _create_and_persist_member_event(self) -> Tuple[EventBase, EventContext]: return memberEvent, memberEventContext - def _create_duplicate_event(self, txn_id: str) -> Tuple[EventBase, EventContext]: + def _create_duplicate_event( + self, txn_id: str + ) -> Tuple[EventBase, UnpersistedEventContextBase]: """Create a new event with the given transaction ID. All events produced by this method will be considered duplicates. """ @@ -107,7 +109,8 @@ def test_duplicated_txn_id(self) -> None: txn_id = "something_suitably_random" - event1, context = self._create_duplicate_event(txn_id) + event1, unpersisted_context = self._create_duplicate_event(txn_id) + context = self.get_success(unpersisted_context.persist(event1)) ret_event1 = self.get_success( self.handler.handle_new_client_event( @@ -119,7 +122,8 @@ def test_duplicated_txn_id(self) -> None: self.assertEqual(event1.event_id, ret_event1.event_id) - event2, context = self._create_duplicate_event(txn_id) + event2, unpersisted_context = self._create_duplicate_event(txn_id) + context = self.get_success(unpersisted_context.persist(event2)) # We want to test that the deduplication at the persit event end works, # so we want to make sure we test with different events. @@ -140,7 +144,9 @@ def test_duplicated_txn_id(self) -> None: # Let's test that calling `persist_event` directly also does the right # thing. - event3, context = self._create_duplicate_event(txn_id) + event3, unpersisted_context = self._create_duplicate_event(txn_id) + context = self.get_success(unpersisted_context.persist(event3)) + self.assertNotEqual(event1.event_id, event3.event_id) ret_event3, event_pos3, _ = self.get_success( @@ -154,7 +160,8 @@ def test_duplicated_txn_id(self) -> None: # Let's test that calling `persist_events` directly also does the right # thing. - event4, context = self._create_duplicate_event(txn_id) + event4, unpersisted_context = self._create_duplicate_event(txn_id) + context = self.get_success(unpersisted_context.persist(event4)) self.assertNotEqual(event1.event_id, event3.event_id) events, _ = self.get_success( @@ -174,8 +181,10 @@ def test_duplicated_txn_id_one_call(self) -> None: txn_id = "something_else_suitably_random" # Create two duplicate events to persist at the same time - event1, context1 = self._create_duplicate_event(txn_id) - event2, context2 = self._create_duplicate_event(txn_id) + event1, unpersisted_context1 = self._create_duplicate_event(txn_id) + context1 = self.get_success(unpersisted_context1.persist(event1)) + event2, unpersisted_context2 = self._create_duplicate_event(txn_id) + context2 = self.get_success(unpersisted_context2.persist(event2)) # Ensure their event IDs are different to start with self.assertNotEqual(event1.event_id, event2.event_id) diff --git a/tests/handlers/test_register.py b/tests/handlers/test_register.py index 1db99b3c0028..aff1ec475884 100644 --- a/tests/handlers/test_register.py +++ b/tests/handlers/test_register.py @@ -507,7 +507,7 @@ def test_auto_create_auto_join_room_preset_invalid_permissions(self) -> None: # Lower the permissions of the inviter. event_creation_handler = self.hs.get_event_creation_handler() requester = create_requester(inviter) - event, context = self.get_success( + event, unpersisted_context = self.get_success( event_creation_handler.create_event( requester, { @@ -519,6 +519,7 @@ def test_auto_create_auto_join_room_preset_invalid_permissions(self) -> None: }, ) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( event_creation_handler.handle_new_client_event( requester, events_and_context=[(event, context)] diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index dce6899e787b..1458076a90ba 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -130,7 +130,7 @@ def test_action_for_event_by_user_handles_noninteger_room_power_levels( # Create a new message event, and try to evaluate it under the dodgy # power level event. - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_event( self.requester, { @@ -145,6 +145,7 @@ def test_action_for_event_by_user_handles_noninteger_room_power_levels( prev_event_ids=[pl_event_id], ) ) + context = self.get_success(unpersisted_context.persist(event)) bulk_evaluator = BulkPushRuleEvaluator(self.hs) # should not raise @@ -170,7 +171,7 @@ def test_action_for_event_by_user_disabled_by_config(self) -> None: """Ensure that push rules are not calculated when disabled in the config""" # Create a new message event which should cause a notification. - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_event( self.requester, { @@ -184,6 +185,7 @@ def test_action_for_event_by_user_disabled_by_config(self) -> None: }, ) ) + context = self.get_success(unpersisted_context.persist(event)) bulk_evaluator = BulkPushRuleEvaluator(self.hs) # Mock the method which calculates push rules -- we do this instead of @@ -200,7 +202,7 @@ def _create_and_process( ) -> bool: """Returns true iff the `mentions` trigger an event push action.""" # Create a new message event which should cause a notification. - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_event( self.requester, { @@ -211,7 +213,7 @@ def _create_and_process( }, ) ) - + context = self.get_success(unpersisted_context.persist(event)) # Execute the push rule machinery. self.get_success(bulk_evaluator.action_for_events_by_user([(event, context)])) @@ -390,7 +392,7 @@ def test_suppress_edits(self) -> None: bulk_evaluator = BulkPushRuleEvaluator(self.hs) # Create & persist an event to use as the parent of the relation. - event, context = self.get_success( + event, unpersisted_context = self.get_success( self.event_creation_handler.create_event( self.requester, { @@ -404,6 +406,7 @@ def test_suppress_edits(self) -> None: }, ) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( self.event_creation_handler.handle_new_client_event( self.requester, events_and_context=[(event, context)] diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 4dd763096d3a..a4900703c415 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -713,7 +713,7 @@ def test_post_room_no_keys(self) -> None: self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(33, channel.resource_usage.db_txn_count) + self.assertEqual(30, channel.resource_usage.db_txn_count) def test_post_room_initial_state(self) -> None: # POST with initial_state config key, expect new room id @@ -726,7 +726,7 @@ def test_post_room_initial_state(self) -> None: self.assertEqual(HTTPStatus.OK, channel.code, channel.result) self.assertTrue("room_id" in channel.json_body) assert channel.resource_usage is not None - self.assertEqual(36, channel.resource_usage.db_txn_count) + self.assertEqual(32, channel.resource_usage.db_txn_count) def test_post_room_visibility_key(self) -> None: # POST with visibility config key, expect new room id diff --git a/tests/storage/test_event_chain.py b/tests/storage/test_event_chain.py index 73d11e7786fb..e39b63edac42 100644 --- a/tests/storage/test_event_chain.py +++ b/tests/storage/test_event_chain.py @@ -522,7 +522,7 @@ def _generate_room(self) -> Tuple[str, List[Set[str]]]: latest_event_ids = self.get_success( self.store.get_prev_events_for_room(room_id) ) - event, context = self.get_success( + event, unpersisted_context = self.get_success( event_handler.create_event( self.requester, { @@ -535,6 +535,7 @@ def _generate_room(self) -> Tuple[str, List[Set[str]]]: prev_event_ids=latest_event_ids, ) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( event_handler.handle_new_client_event( self.requester, events_and_context=[(event, context)] @@ -544,7 +545,7 @@ def _generate_room(self) -> Tuple[str, List[Set[str]]]: assert state_ids1 is not None state1 = set(state_ids1.values()) - event, context = self.get_success( + event, unpersisted_context = self.get_success( event_handler.create_event( self.requester, { @@ -557,6 +558,7 @@ def _generate_room(self) -> Tuple[str, List[Set[str]]]: prev_event_ids=latest_event_ids, ) ) + context = self.get_success(unpersisted_context.persist(event)) self.get_success( event_handler.handle_new_client_event( self.requester, events_and_context=[(event, context)] diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index e82c03f5970b..62aed6af0a73 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -496,3 +496,129 @@ def test_get_state_for_event(self) -> None: self.assertEqual(is_all, True) self.assertDictEqual({(e5.type, e5.state_key): e5.event_id}, state_dict) + + def test_batched_state_group_storing(self) -> None: + creation_event = self.inject_state_event( + self.room, self.u_alice, EventTypes.Create, "", {} + ) + state_to_event = self.get_success( + self.storage.state.get_state_groups( + self.room.to_string(), [creation_event.event_id] + ) + ) + current_state_group = list(state_to_event.keys())[0] + + # create some unpersisted events and event contexts to store against room + events_and_context = [] + builder = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.Name, + "sender": self.u_alice.to_string(), + "state_key": "", + "room_id": self.room.to_string(), + "content": {"name": "first rename of room"}, + }, + ) + + event1, unpersisted_context1 = self.get_success( + self.event_creation_handler.create_new_client_event(builder) + ) + events_and_context.append((event1, unpersisted_context1)) + + builder2 = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.JoinRules, + "sender": self.u_alice.to_string(), + "state_key": "", + "room_id": self.room.to_string(), + "content": {"join_rule": "private"}, + }, + ) + + event2, unpersisted_context2 = self.get_success( + self.event_creation_handler.create_new_client_event(builder2) + ) + events_and_context.append((event2, unpersisted_context2)) + + builder3 = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.Message, + "sender": self.u_alice.to_string(), + "room_id": self.room.to_string(), + "content": {"body": "hello from event 3", "msgtype": "m.text"}, + }, + ) + + event3, unpersisted_context3 = self.get_success( + self.event_creation_handler.create_new_client_event(builder3) + ) + events_and_context.append((event3, unpersisted_context3)) + + builder4 = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": EventTypes.JoinRules, + "sender": self.u_alice.to_string(), + "state_key": "", + "room_id": self.room.to_string(), + "content": {"join_rule": "public"}, + }, + ) + + event4, unpersisted_context4 = self.get_success( + self.event_creation_handler.create_new_client_event(builder4) + ) + events_and_context.append((event4, unpersisted_context4)) + + processed_events_and_context = self.get_success( + self.hs.get_datastores().state.store_state_deltas_for_batched( + events_and_context, self.room.to_string(), current_state_group + ) + ) + + # check that only state events are in state_groups, and all state events are in state_groups + res = self.get_success( + self.store.db_pool.simple_select_list( + table="state_groups", + keyvalues=None, + retcols=("event_id",), + ) + ) + + events = [] + for result in res: + self.assertNotIn(event3.event_id, result) + events.append(result.get("event_id")) + + for event, _ in processed_events_and_context: + if event.is_state(): + self.assertIn(event.event_id, events) + + # check that each unique state has state group in state_groups_state and that the + # type/state key is correct, and check that each state event's state group + # has an entry and prev event in state_group_edges + for event, context in processed_events_and_context: + if event.is_state(): + state = self.get_success( + self.store.db_pool.simple_select_list( + table="state_groups_state", + keyvalues={"state_group": context.state_group_after_event}, + retcols=("type", "state_key"), + ) + ) + self.assertEqual(event.type, state[0].get("type")) + self.assertEqual(event.state_key, state[0].get("state_key")) + + groups = self.get_success( + self.store.db_pool.simple_select_list( + table="state_group_edges", + keyvalues={"state_group": str(context.state_group_after_event)}, + retcols=("*",), + ) + ) + self.assertEqual( + context.state_group_before_event, groups[0].get("prev_state_group") + ) diff --git a/tests/unittest.py b/tests/unittest.py index b21e7f122196..f9160faa1d0c 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -723,7 +723,7 @@ def create_and_send_event( event_creator = self.hs.get_event_creation_handler() requester = create_requester(user) - event, context = self.get_success( + event, unpersisted_context = self.get_success( event_creator.create_event( requester, { @@ -735,7 +735,7 @@ def create_and_send_event( prev_event_ids=prev_event_ids, ) ) - + context = self.get_success(unpersisted_context.persist(event)) if soft_failed: event.internal_metadata.soft_failed = True From a74c099ece785256800859abcfc1d1d3fc8f6f53 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:28:57 +0000 Subject: [PATCH 237/344] Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0 (#15152) * Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0 Bumps [dawidd6/action-download-artifact](https://github.com/dawidd6/action-download-artifact) from 2.25.0 to 2.26.0. - [Release notes](https://github.com/dawidd6/action-download-artifact/releases) - [Commits](https://github.com/dawidd6/action-download-artifact/compare/b59d8c6a6c5c6c6437954f470d963c0b20ea7415...5e780fc7bbd0cac69fc73271ed86edf5dcb72d67) --- updated-dependencies: - dependency-name: dawidd6/action-download-artifact dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/docs-pr-netlify.yaml | 2 +- changelog.d/15152.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15152.misc diff --git a/.github/workflows/docs-pr-netlify.yaml b/.github/workflows/docs-pr-netlify.yaml index 1704b3ce93ac..a5e74eb297db 100644 --- a/.github/workflows/docs-pr-netlify.yaml +++ b/.github/workflows/docs-pr-netlify.yaml @@ -14,7 +14,7 @@ jobs: # There's a 'download artifact' action, but it hasn't been updated for the workflow_run action # (https://github.com/actions/download-artifact/issues/60) so instead we get this mess: - name: 📥 Download artifact - uses: dawidd6/action-download-artifact@b59d8c6a6c5c6c6437954f470d963c0b20ea7415 # v2.25.0 + uses: dawidd6/action-download-artifact@5e780fc7bbd0cac69fc73271ed86edf5dcb72d67 # v2.26.0 with: workflow: docs-pr.yaml run_id: ${{ github.event.workflow_run.id }} diff --git a/changelog.d/15152.misc b/changelog.d/15152.misc new file mode 100644 index 000000000000..6b2c73d0ab31 --- /dev/null +++ b/changelog.d/15152.misc @@ -0,0 +1 @@ +Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0. From 1ff2d20a6f25fb02a17ca808390e5f4a7e94c1fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:29:51 +0000 Subject: [PATCH 238/344] Bump docker/login-action from 1 to 2 (#15154) * Bump docker/login-action from 1 to 2 Bumps [docker/login-action](https://github.com/docker/login-action) from 1 to 2. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/v1...v2) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/push_complement_image.yml | 2 +- changelog.d/15154.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15154.misc diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index f26143de6bbf..b76c4cb32382 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -48,7 +48,7 @@ jobs: with: ref: master - name: Login to registry - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} diff --git a/changelog.d/15154.misc b/changelog.d/15154.misc new file mode 100644 index 000000000000..c958b520780a --- /dev/null +++ b/changelog.d/15154.misc @@ -0,0 +1 @@ +Bump docker/login-action from 1 to 2. From 965956160aca51e30f58dd7e362c909279db2a50 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:32:52 +0000 Subject: [PATCH 239/344] Bump actions/checkout from 2 to 3 (#15155) * Bump actions/checkout from 2 to 3 Bumps [actions/checkout](https://github.com/actions/checkout) from 2 to 3. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v2...v3) --- updated-dependencies: - dependency-name: actions/checkout dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/docs-pr.yaml | 4 ++-- changelog.d/15155.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15155.misc diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index d41f6c449055..6634f2644ee8 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -12,7 +12,7 @@ jobs: name: GitHub Pages runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Setup mdbook uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 @@ -39,7 +39,7 @@ jobs: name: Check links in documentation runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Setup mdbook uses: peaceiris/actions-mdbook@adeb05db28a0c0004681db83893d56c0388ea9ea # v1.2.0 diff --git a/changelog.d/15155.misc b/changelog.d/15155.misc new file mode 100644 index 000000000000..40c73e96ec33 --- /dev/null +++ b/changelog.d/15155.misc @@ -0,0 +1 @@ +Bump actions/checkout from 2 to 3. From 81a0dc35f7c9b0c5368b77d3510ea7c50bb222b0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:33:27 +0000 Subject: [PATCH 240/344] Bump matrix-org/backend-meta from 1 to 2 (#15156) * Bump matrix-org/backend-meta from 1 to 2 Bumps [matrix-org/backend-meta](https://github.com/matrix-org/backend-meta) from 1 to 2. - [Release notes](https://github.com/matrix-org/backend-meta/releases) - [Commits](https://github.com/matrix-org/backend-meta/compare/v1...v2) --- updated-dependencies: - dependency-name: matrix-org/backend-meta dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- .github/workflows/triage-incoming.yml | 2 +- changelog.d/15156.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15156.misc diff --git a/.github/workflows/triage-incoming.yml b/.github/workflows/triage-incoming.yml index 0f0397cf5bc6..24dac47bf247 100644 --- a/.github/workflows/triage-incoming.yml +++ b/.github/workflows/triage-incoming.yml @@ -6,7 +6,7 @@ on: jobs: triage: - uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v1 + uses: matrix-org/backend-meta/.github/workflows/triage-incoming.yml@v2 with: project_id: 'PVT_kwDOAIB0Bs4AFDdZ' content_id: ${{ github.event.issue.node_id }} diff --git a/changelog.d/15156.misc b/changelog.d/15156.misc new file mode 100644 index 000000000000..ebae4cb45698 --- /dev/null +++ b/changelog.d/15156.misc @@ -0,0 +1 @@ +Bump matrix-org/backend-meta from 1 to 2. From 229ae5bcec8065485eb0049f8ac8b52f181c3410 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:34:57 +0000 Subject: [PATCH 241/344] Bump typing-extensions from 4.4.0 to 4.5.0 (#15157) * Bump typing-extensions from 4.4.0 to 4.5.0 Bumps [typing-extensions](https://github.com/python/typing_extensions) from 4.4.0 to 4.5.0. - [Release notes](https://github.com/python/typing_extensions/releases) - [Changelog](https://github.com/python/typing_extensions/blob/main/CHANGELOG.md) - [Commits](https://github.com/python/typing_extensions/compare/4.4.0...4.5.0) --- updated-dependencies: - dependency-name: typing-extensions dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15157.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15157.misc diff --git a/changelog.d/15157.misc b/changelog.d/15157.misc new file mode 100644 index 000000000000..730b706dfef4 --- /dev/null +++ b/changelog.d/15157.misc @@ -0,0 +1 @@ +Bump typing-extensions from 4.4.0 to 4.5.0. diff --git a/poetry.lock b/poetry.lock index 8ffdab7a222f..0764f366adaa 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2755,14 +2755,14 @@ files = [ [[package]] name = "typing-extensions" -version = "4.4.0" +version = "4.5.0" description = "Backported and Experimental Type Hints for Python 3.7+" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, - {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, + {file = "typing_extensions-4.5.0-py3-none-any.whl", hash = "sha256:fb33085c39dd998ac16d1431ebc293a8b3eedd00fd4a32de0ff79002c19511b4"}, + {file = "typing_extensions-4.5.0.tar.gz", hash = "sha256:5cb5f4a79139d699607b3ef622a1dedafa84e115ab0024e0d9c044a9479ca7cb"}, ] [[package]] From 80884579f5a15b0a342cc80d19677a0d72d5543a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:37:33 +0000 Subject: [PATCH 242/344] Bump types-opentracing from 2.4.10.1 to 2.4.10.3 (#15158) * Bump types-opentracing from 2.4.10.1 to 2.4.10.3 Bumps [types-opentracing](https://github.com/python/typeshed) from 2.4.10.1 to 2.4.10.3. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-opentracing dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15158.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15158.misc diff --git a/changelog.d/15158.misc b/changelog.d/15158.misc new file mode 100644 index 000000000000..fc0eecfd2145 --- /dev/null +++ b/changelog.d/15158.misc @@ -0,0 +1 @@ +Bump types-opentracing from 2.4.10.1 to 2.4.10.3. diff --git a/poetry.lock b/poetry.lock index 0764f366adaa..f6b98593e06e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2650,14 +2650,14 @@ files = [ [[package]] name = "types-opentracing" -version = "2.4.10.1" +version = "2.4.10.3" description = "Typing stubs for opentracing" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-opentracing-2.4.10.1.tar.gz", hash = "sha256:49e7e52b8b6e221865a9201fc8c2df0bcda8e7098d4ebb35903dbfa4b4d29195"}, - {file = "types_opentracing-2.4.10.1-py3-none-any.whl", hash = "sha256:eb63394acd793e7d9e327956242349fee14580a87c025408dc268d4dd883cc24"}, + {file = "types-opentracing-2.4.10.3.tar.gz", hash = "sha256:b277f114265b41216714f9c77dffcab57038f1730fd141e2c55c5c9f6f2caa87"}, + {file = "types_opentracing-2.4.10.3-py3-none-any.whl", hash = "sha256:60244d718fcd9de7043645ecaf597222d550432507098ab2e6268f7b589a7fa7"}, ] [[package]] From d3afe59d5abf0293d5300db07483c09b45681e81 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:38:10 +0000 Subject: [PATCH 243/344] Bump types-setuptools from 67.3.0.1 to 67.4.0.3 (#15160) * Bump types-setuptools from 67.3.0.1 to 67.4.0.3 Bumps [types-setuptools](https://github.com/python/typeshed) from 67.3.0.1 to 67.4.0.3. - [Release notes](https://github.com/python/typeshed/releases) - [Commits](https://github.com/python/typeshed/commits) --- updated-dependencies: - dependency-name: types-setuptools dependency-type: direct:development update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15160.misc | 1 + poetry.lock | 21 +++------------------ 2 files changed, 4 insertions(+), 18 deletions(-) create mode 100644 changelog.d/15160.misc diff --git a/changelog.d/15160.misc b/changelog.d/15160.misc new file mode 100644 index 000000000000..13b098d17c51 --- /dev/null +++ b/changelog.d/15160.misc @@ -0,0 +1 @@ +Bump types-setuptools from 67.3.0.1 to 67.4.0.3. diff --git a/poetry.lock b/poetry.lock index f6b98593e06e..03ca28d9f54a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2600,18 +2600,6 @@ files = [ types-enum34 = "*" types-ipaddress = "*" -[[package]] -name = "types-docutils" -version = "0.19.1.1" -description = "Typing stubs for docutils" -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "types-docutils-0.19.1.1.tar.gz", hash = "sha256:be0a51ba1c7dd215d9d2df66d6845e63c1009b4bbf4c5beb87a0d9745cdba962"}, - {file = "types_docutils-0.19.1.1-py3-none-any.whl", hash = "sha256:a024cada35f0c13cc45eb0b68a102719018a634013690b7fef723bcbfadbd1f1"}, -] - [[package]] name = "types-enum34" version = "1.1.8" @@ -2728,19 +2716,16 @@ types-urllib3 = "<1.27" [[package]] name = "types-setuptools" -version = "67.3.0.1" +version = "67.4.0.3" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-67.3.0.1.tar.gz", hash = "sha256:1a26d373036c720e566823b6edd664a2db4d138b6eeba856721ec1254203474f"}, - {file = "types_setuptools-67.3.0.1-py3-none-any.whl", hash = "sha256:a7e0f0816b5b449f5bcdc0efa43da91ff81dbe6941f293a6490d68a450e130a1"}, + {file = "types-setuptools-67.4.0.3.tar.gz", hash = "sha256:19e958dfdbf1c5a628e54c2a7ee84935051afb7278d0c1cdb08ac194757ee3b1"}, + {file = "types_setuptools-67.4.0.3-py3-none-any.whl", hash = "sha256:3c83c3a6363dd3ddcdd054796705605f0fa8b8e5a39390e07a05e5f7af054978"}, ] -[package.dependencies] -types-docutils = "*" - [[package]] name = "types-urllib3" version = "1.26.10" From f7e49afb99127a086fd9f0fd11278f4d7a43d98e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 27 Feb 2023 11:39:26 +0000 Subject: [PATCH 244/344] Bump ruff from 0.0.237 to 0.0.252 (#15159) * Bump ruff from 0.0.237 to 0.0.252 Bumps [ruff](https://github.com/charliermarsh/ruff) from 0.0.237 to 0.0.252. - [Release notes](https://github.com/charliermarsh/ruff/releases) - [Changelog](https://github.com/charliermarsh/ruff/blob/main/BREAKING_CHANGES.md) - [Commits](https://github.com/charliermarsh/ruff/compare/v0.0.237...v0.0.252) --- updated-dependencies: - dependency-name: ruff dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15159.misc | 1 + poetry.lock | 37 +++++++++++++++++++------------------ pyproject.toml | 2 +- 3 files changed, 21 insertions(+), 19 deletions(-) create mode 100644 changelog.d/15159.misc diff --git a/changelog.d/15159.misc b/changelog.d/15159.misc new file mode 100644 index 000000000000..ebb857a89ef5 --- /dev/null +++ b/changelog.d/15159.misc @@ -0,0 +1 @@ +Bump ruff from 0.0.237 to 0.0.252. diff --git a/poetry.lock b/poetry.lock index 03ca28d9f54a..cd3dc6fdcd7e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1985,28 +1985,29 @@ jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] [[package]] name = "ruff" -version = "0.0.237" +version = "0.0.252" description = "An extremely fast Python linter, written in Rust." category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.0.237-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:2ea04d826ffca58a7ae926115a801960c757d53c9027f2ca9acbe84c9f2b2f04"}, - {file = "ruff-0.0.237-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:8ed113937fab9f73f8c1a6c0350bb4fe03e951370139c6e0adb81f48a8dcf4c6"}, - {file = "ruff-0.0.237-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9bcb71a3efb5fe886eb48d739cfae5df4a15617e7b5a7668aa45ebf74c0d3fa"}, - {file = "ruff-0.0.237-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:80ce10718abbf502818c0d650ebab99fdcef5e937a1ded3884493ddff804373c"}, - {file = "ruff-0.0.237-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0cc6cb7c1efcc260df5a939435649610a28f9f438b8b313384c8985ac6574f9f"}, - {file = "ruff-0.0.237-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7eef0c7a1e45a4e30328ae101613575944cbf47a3a11494bf9827722da6c66b3"}, - {file = "ruff-0.0.237-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0d122433a21ce4a21fbba34b73fc3add0ccddd1643b3ff5abb8d2767952f872e"}, - {file = "ruff-0.0.237-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b76311335adda4de3c1d471e64e89a49abfeebf02647e3db064e7740e7f36ed6"}, - {file = "ruff-0.0.237-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46c5977b643aaf2b6f84641265f835b6c7f67fcca38dbae08c4f15602e084ca0"}, - {file = "ruff-0.0.237-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3d6ed86d0d4d742360a262d52191581f12b669a68e59ae3b52e80d7483b3d7b3"}, - {file = "ruff-0.0.237-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fedfb60f986c26cdb1809db02866e68508db99910c587d2c4066a5c07aa85593"}, - {file = "ruff-0.0.237-py3-none-musllinux_1_2_i686.whl", hash = "sha256:bb96796be5919871fa9ae7e88968ba9e14306d9a3f217ca6c204f68a5abeccdd"}, - {file = "ruff-0.0.237-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:ea239cfedf67b74ea4952e1074bb99a4281c2145441d70bc7e2f058d5c49f1c9"}, - {file = "ruff-0.0.237-py3-none-win32.whl", hash = "sha256:8d6a1d21ae15da2b1dcffeee2606e90de0e6717e72957da7d16ab6ae18dd0058"}, - {file = "ruff-0.0.237-py3-none-win_amd64.whl", hash = "sha256:525e5ec81cee29b993f77976026a6bf44528a14aa6edb1ef47bd8079147395ae"}, - {file = "ruff-0.0.237.tar.gz", hash = "sha256:630c575f543733adf6c19a11d9a02ca9ecc364bd7140af8a4c854d4728be6b56"}, + {file = "ruff-0.0.252-py3-none-macosx_10_7_x86_64.whl", hash = "sha256:349367a227c4db7abbc3a9993efea8a608b5bea4bb4a1e5fc6f0d56819524f92"}, + {file = "ruff-0.0.252-py3-none-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", hash = "sha256:ce77f9106d96b4faf7865860fb5155b9deaf6f699d9c279118c5ad947739ecaf"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:edadb0b050293b4e60dab979ba6a4e734d9c899cbe316a0ee5b65e3cdd39c750"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4efdae98937d1e4d23ab0b7fc7e8e6b6836cc7d2d42238ceeacbc793ef780542"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8546d879f7d3f669379a03e7b103d90e11901976ab508aeda59c03dfd8a359e"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:83fdc7169b6c1fb5fe8d1cdf345697f558c1b433ef97df9ca11defa2a8f3ee9e"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84ed9be1a17e2a556a571a5b959398633dd10910abd8dcf8b098061e746e892d"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f5e77bd9ba4438cf2ee32154e2673afe22f538ef29f5d65ca47e3dc46c42cf8"}, + {file = "ruff-0.0.252-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5179b94b45c0f8512eaff3ab304c14714a46df2e9ca72a9d96084adc376b71"}, + {file = "ruff-0.0.252-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:92efd8a71157595df5bc46aaaa0613d8a2fbc5cddc53ae7b749c16025c324732"}, + {file = "ruff-0.0.252-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:fd350fc10832cfd28e681d829a8aa83ea3e653326e0ea9d98637dfb8d46177d2"}, + {file = "ruff-0.0.252-py3-none-musllinux_1_2_i686.whl", hash = "sha256:f119240c9631216e846166e06023b1d878e25fbac93bf20da50069e91cfbfaee"}, + {file = "ruff-0.0.252-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5c5a49f89f5ede93d16eddfeeadd7e5739ec703e8f63ac95eac30236b9e49da3"}, + {file = "ruff-0.0.252-py3-none-win32.whl", hash = "sha256:89a897dc743f2fe063483ea666097e72e848f4bbe40493fe0533e61799959f6e"}, + {file = "ruff-0.0.252-py3-none-win_amd64.whl", hash = "sha256:cdc89ad6ff88519b1fb1816ac82a9ad910762c90ff5fd64dda7691b72d36aff7"}, + {file = "ruff-0.0.252-py3-none-win_arm64.whl", hash = "sha256:4b594a17cf53077165429486650658a0e1b2ac6ab88954f5afd50d2b1b5657a9"}, + {file = "ruff-0.0.252.tar.gz", hash = "sha256:6992611ab7bdbe7204e4831c95ddd3febfeece2e6f5e44bbed044454c7db0f63"}, ] [[package]] @@ -3029,4 +3030,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "e12077711e5ff83f3c6038ea44c37bd49773799ec8245035b01094b7800c5c92" +content-hash = "7bcffef7b6e6d4b1113222e2ca152b3798c997872789c8a1ea01238f199d56fe" diff --git a/pyproject.toml b/pyproject.toml index cef7d295c131..a48b35fa6397 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -313,7 +313,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.237" +ruff = "0.0.252" # Typechecking mypy = "*" From 3f2ef205e228282a8a744db59115caa4b17da9a1 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 27 Feb 2023 13:03:22 +0000 Subject: [PATCH 245/344] Small fixes to `MatrixFederationHttpClient` docstrings (#15148) --- changelog.d/15148.doc | 1 + synapse/http/matrixfederationclient.py | 16 +++++++++------- 2 files changed, 10 insertions(+), 7 deletions(-) create mode 100644 changelog.d/15148.doc diff --git a/changelog.d/15148.doc b/changelog.d/15148.doc new file mode 100644 index 000000000000..4e9e16330651 --- /dev/null +++ b/changelog.d/15148.doc @@ -0,0 +1 @@ +Correct small documentation errors in some `MatrixFederationHttpClient` methods. \ No newline at end of file diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 312aab4dcc7b..3302d4e48a0f 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -440,7 +440,7 @@ async def _send_request( Args: request: details of request to be sent - retry_on_dns_fail: true if the request should be retied on DNS failures + retry_on_dns_fail: true if the request should be retried on DNS failures timeout: number of milliseconds to wait for the response headers (including connecting to the server), *for each attempt*. @@ -475,7 +475,7 @@ async def _send_request( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -871,7 +871,7 @@ async def put_json( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -958,7 +958,7 @@ async def post_json( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -1036,6 +1036,8 @@ async def get_json( args: A dictionary used to create query strings, defaults to None. + retry_on_dns_fail: true if the request should be retried on DNS failures + timeout: number of milliseconds to wait for the response. self._default_timeout (60s) by default. @@ -1063,7 +1065,7 @@ async def get_json( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -1141,7 +1143,7 @@ async def delete_json( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. @@ -1197,7 +1199,7 @@ async def get_file( (except 429). NotRetryingDestination: If we are not yet ready to retry this server. - FederationDeniedError: If this destination is not on our + FederationDeniedError: If this destination is not on our federation whitelist RequestSendFailed: If there were problems connecting to the remote, due to e.g. DNS failures, connection timeouts etc. From 4fc8875876374ec8f97a3b3cc344a4e3abcf769f Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 27 Feb 2023 08:26:05 -0500 Subject: [PATCH 246/344] Refactor media modules. (#15146) * Removes the `v1` directory from `test.rest.media.v1`. * Moves the non-REST code from `synapse.rest.media.v1` to `synapse.media`. * Flatten the `v1` directory from `synapse.rest.media`, but leave compatiblity with 3rd party media repositories and spam checkers. --- changelog.d/15146.misc | 1 + .../move_remote_media_to_new_store.py | 2 +- synapse/config/repository.py | 12 +- synapse/events/spamcheck.py | 4 +- synapse/media/_base.py | 479 ++++++++++++++++++ synapse/{rest/media/v1 => media}/filepath.py | 0 .../media/v1 => media}/media_repository.py | 94 +--- synapse/media/media_storage.py | 374 ++++++++++++++ synapse/{rest/media/v1 => media}/oembed.py | 2 +- .../{rest/media/v1 => media}/preview_html.py | 0 synapse/media/storage_provider.py | 181 +++++++ .../{rest/media/v1 => media}/thumbnailer.py | 0 .../rest/media/{v1 => }/config_resource.py | 0 .../rest/media/{v1 => }/download_resource.py | 5 +- .../rest/media/media_repository_resource.py | 93 ++++ .../media/{v1 => }/preview_url_resource.py | 12 +- .../rest/media/{v1 => }/thumbnail_resource.py | 7 +- .../rest/media/{v1 => }/upload_resource.py | 4 +- synapse/rest/media/v1/_base.py | 470 +---------------- synapse/rest/media/v1/media_storage.py | 365 +------------ synapse/rest/media/v1/storage_provider.py | 172 +------ synapse/server.py | 6 +- tests/{rest/media/v1 => media}/__init__.py | 2 +- tests/{rest/media/v1 => media}/test_base.py | 2 +- .../{rest/media/v1 => media}/test_filepath.py | 2 +- .../media/v1 => media}/test_html_preview.py | 2 +- .../media/v1 => media}/test_media_storage.py | 10 +- tests/{rest/media/v1 => media}/test_oembed.py | 2 +- tests/rest/admin/test_media.py | 2 +- tests/rest/admin/test_user.py | 2 +- tests/rest/media/{v1 => }/test_url_preview.py | 6 +- 31 files changed, 1190 insertions(+), 1123 deletions(-) create mode 100644 changelog.d/15146.misc create mode 100644 synapse/media/_base.py rename synapse/{rest/media/v1 => media}/filepath.py (100%) rename synapse/{rest/media/v1 => media}/media_repository.py (92%) create mode 100644 synapse/media/media_storage.py rename synapse/{rest/media/v1 => media}/oembed.py (99%) rename synapse/{rest/media/v1 => media}/preview_html.py (100%) create mode 100644 synapse/media/storage_provider.py rename synapse/{rest/media/v1 => media}/thumbnailer.py (100%) rename synapse/rest/media/{v1 => }/config_resource.py (100%) rename synapse/rest/media/{v1 => }/download_resource.py (95%) create mode 100644 synapse/rest/media/media_repository_resource.py rename synapse/rest/media/{v1 => }/preview_url_resource.py (98%) rename synapse/rest/media/{v1 => }/thumbnail_resource.py (99%) rename synapse/rest/media/{v1 => }/upload_resource.py (96%) rename tests/{rest/media/v1 => media}/__init__.py (91%) rename tests/{rest/media/v1 => media}/test_base.py (95%) rename tests/{rest/media/v1 => media}/test_filepath.py (99%) rename tests/{rest/media/v1 => media}/test_html_preview.py (99%) rename tests/{rest/media/v1 => media}/test_media_storage.py (98%) rename tests/{rest/media/v1 => media}/test_oembed.py (98%) rename tests/rest/media/{v1 => }/test_url_preview.py (99%) diff --git a/changelog.d/15146.misc b/changelog.d/15146.misc new file mode 100644 index 000000000000..8de5f952398f --- /dev/null +++ b/changelog.d/15146.misc @@ -0,0 +1 @@ +Refactor the media modules. diff --git a/synapse/_scripts/move_remote_media_to_new_store.py b/synapse/_scripts/move_remote_media_to_new_store.py index 819afaaca6db..0dd36bee20ad 100755 --- a/synapse/_scripts/move_remote_media_to_new_store.py +++ b/synapse/_scripts/move_remote_media_to_new_store.py @@ -37,7 +37,7 @@ import shutil import sys -from synapse.rest.media.v1.filepath import MediaFilePaths +from synapse.media.filepath import MediaFilePaths logger = logging.getLogger() diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 2da40c09f0e1..ecb3edbe3ade 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -178,11 +178,13 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: for i, provider_config in enumerate(storage_providers): # We special case the module "file_system" so as not to need to # expose FileStorageProviderBackend - if provider_config["module"] == "file_system": - provider_config["module"] = ( - "synapse.rest.media.v1.storage_provider" - ".FileStorageProviderBackend" - ) + if ( + provider_config["module"] == "file_system" + or provider_config["module"] == "synapse.rest.media.v1.storage_provider" + ): + provider_config[ + "module" + ] = "synapse.media.storage_provider.FileStorageProviderBackend" provider_class, parsed_config = load_module( provider_config, ("media_storage_providers", "" % i) diff --git a/synapse/events/spamcheck.py b/synapse/events/spamcheck.py index 623a2c71eadb..765c15bb5135 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/events/spamcheck.py @@ -33,8 +33,8 @@ import synapse from synapse.api.errors import Codes from synapse.logging.opentracing import trace -from synapse.rest.media.v1._base import FileInfo -from synapse.rest.media.v1.media_storage import ReadableFileWrapper +from synapse.media._base import FileInfo +from synapse.media.media_storage import ReadableFileWrapper from synapse.spam_checker_api import RegistrationBehaviour from synapse.types import JsonDict, RoomAlias, UserProfile from synapse.util.async_helpers import delay_cancellation, maybe_awaitable diff --git a/synapse/media/_base.py b/synapse/media/_base.py new file mode 100644 index 000000000000..ef8334ae2586 --- /dev/null +++ b/synapse/media/_base.py @@ -0,0 +1,479 @@ +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2019-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import os +import urllib +from abc import ABC, abstractmethod +from types import TracebackType +from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type + +import attr + +from twisted.internet.interfaces import IConsumer +from twisted.protocols.basic import FileSender +from twisted.web.server import Request + +from synapse.api.errors import Codes, SynapseError, cs_error +from synapse.http.server import finish_request, respond_with_json +from synapse.http.site import SynapseRequest +from synapse.logging.context import make_deferred_yieldable +from synapse.util.stringutils import is_ascii, parse_and_validate_server_name + +logger = logging.getLogger(__name__) + +# list all text content types that will have the charset default to UTF-8 when +# none is given +TEXT_CONTENT_TYPES = [ + "text/css", + "text/csv", + "text/html", + "text/calendar", + "text/plain", + "text/javascript", + "application/json", + "application/ld+json", + "application/rtf", + "image/svg+xml", + "text/xml", +] + + +def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]: + """Parses the server name, media ID and optional file name from the request URI + + Also performs some rough validation on the server name. + + Args: + request: The `Request`. + + Returns: + A tuple containing the parsed server name, media ID and optional file name. + + Raises: + SynapseError(404): if parsing or validation fail for any reason + """ + try: + # The type on postpath seems incorrect in Twisted 21.2.0. + postpath: List[bytes] = request.postpath # type: ignore + assert postpath + + # This allows users to append e.g. /test.png to the URL. Useful for + # clients that parse the URL to see content type. + server_name_bytes, media_id_bytes = postpath[:2] + server_name = server_name_bytes.decode("utf-8") + media_id = media_id_bytes.decode("utf8") + + # Validate the server name, raising if invalid + parse_and_validate_server_name(server_name) + + file_name = None + if len(postpath) > 2: + try: + file_name = urllib.parse.unquote(postpath[-1].decode("utf-8")) + except UnicodeDecodeError: + pass + return server_name, media_id, file_name + except Exception: + raise SynapseError( + 404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN + ) + + +def respond_404(request: SynapseRequest) -> None: + respond_with_json( + request, + 404, + cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND), + send_cors=True, + ) + + +async def respond_with_file( + request: SynapseRequest, + media_type: str, + file_path: str, + file_size: Optional[int] = None, + upload_name: Optional[str] = None, +) -> None: + logger.debug("Responding with %r", file_path) + + if os.path.isfile(file_path): + if file_size is None: + stat = os.stat(file_path) + file_size = stat.st_size + + add_file_headers(request, media_type, file_size, upload_name) + + with open(file_path, "rb") as f: + await make_deferred_yieldable(FileSender().beginFileTransfer(f, request)) + + finish_request(request) + else: + respond_404(request) + + +def add_file_headers( + request: Request, + media_type: str, + file_size: Optional[int], + upload_name: Optional[str], +) -> None: + """Adds the correct response headers in preparation for responding with the + media. + + Args: + request + media_type: The media/content type. + file_size: Size in bytes of the media, if known. + upload_name: The name of the requested file, if any. + """ + + def _quote(x: str) -> str: + return urllib.parse.quote(x.encode("utf-8")) + + # Default to a UTF-8 charset for text content types. + # ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16' + if media_type.lower() in TEXT_CONTENT_TYPES: + content_type = media_type + "; charset=UTF-8" + else: + content_type = media_type + + request.setHeader(b"Content-Type", content_type.encode("UTF-8")) + if upload_name: + # RFC6266 section 4.1 [1] defines both `filename` and `filename*`. + # + # `filename` is defined to be a `value`, which is defined by RFC2616 + # section 3.6 [2] to be a `token` or a `quoted-string`, where a `token` + # is (essentially) a single US-ASCII word, and a `quoted-string` is a + # US-ASCII string surrounded by double-quotes, using backslash as an + # escape character. Note that %-encoding is *not* permitted. + # + # `filename*` is defined to be an `ext-value`, which is defined in + # RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`, + # where `value-chars` is essentially a %-encoded string in the given charset. + # + # [1]: https://tools.ietf.org/html/rfc6266#section-4.1 + # [2]: https://tools.ietf.org/html/rfc2616#section-3.6 + # [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1 + + # We avoid the quoted-string version of `filename`, because (a) synapse didn't + # correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we + # may as well just do the filename* version. + if _can_encode_filename_as_token(upload_name): + disposition = "inline; filename=%s" % (upload_name,) + else: + disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),) + + request.setHeader(b"Content-Disposition", disposition.encode("ascii")) + + # cache for at least a day. + # XXX: we might want to turn this off for data we don't want to + # recommend caching as it's sensitive or private - or at least + # select private. don't bother setting Expires as all our + # clients are smart enough to be happy with Cache-Control + request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400") + if file_size is not None: + request.setHeader(b"Content-Length", b"%d" % (file_size,)) + + # Tell web crawlers to not index, archive, or follow links in media. This + # should help to prevent things in the media repo from showing up in web + # search results. + request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex") + + +# separators as defined in RFC2616. SP and HT are handled separately. +# see _can_encode_filename_as_token. +_FILENAME_SEPARATOR_CHARS = { + "(", + ")", + "<", + ">", + "@", + ",", + ";", + ":", + "\\", + '"', + "/", + "[", + "]", + "?", + "=", + "{", + "}", +} + + +def _can_encode_filename_as_token(x: str) -> bool: + for c in x: + # from RFC2616: + # + # token = 1* + # + # separators = "(" | ")" | "<" | ">" | "@" + # | "," | ";" | ":" | "\" | <"> + # | "/" | "[" | "]" | "?" | "=" + # | "{" | "}" | SP | HT + # + # CHAR = + # + # CTL = + # + if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS: + return False + return True + + +async def respond_with_responder( + request: SynapseRequest, + responder: "Optional[Responder]", + media_type: str, + file_size: Optional[int], + upload_name: Optional[str] = None, +) -> None: + """Responds to the request with given responder. If responder is None then + returns 404. + + Args: + request + responder + media_type: The media/content type. + file_size: Size in bytes of the media. If not known it should be None + upload_name: The name of the requested file, if any. + """ + if not responder: + respond_404(request) + return + + # If we have a responder we *must* use it as a context manager. + with responder: + if request._disconnected: + logger.warning( + "Not sending response to request %s, already disconnected.", request + ) + return + + logger.debug("Responding to media request with responder %s", responder) + add_file_headers(request, media_type, file_size, upload_name) + try: + await responder.write_to_consumer(request) + except Exception as e: + # The majority of the time this will be due to the client having gone + # away. Unfortunately, Twisted simply throws a generic exception at us + # in that case. + logger.warning("Failed to write to consumer: %s %s", type(e), e) + + # Unregister the producer, if it has one, so Twisted doesn't complain + if request.producer: + request.unregisterProducer() + + finish_request(request) + + +class Responder(ABC): + """Represents a response that can be streamed to the requester. + + Responder is a context manager which *must* be used, so that any resources + held can be cleaned up. + """ + + @abstractmethod + def write_to_consumer(self, consumer: IConsumer) -> Awaitable: + """Stream response into consumer + + Args: + consumer: The consumer to stream into. + + Returns: + Resolves once the response has finished being written + """ + raise NotImplementedError() + + def __enter__(self) -> None: # noqa: B027 + pass + + def __exit__( # noqa: B027 + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + pass + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class ThumbnailInfo: + """Details about a generated thumbnail.""" + + width: int + height: int + method: str + # Content type of thumbnail, e.g. image/png + type: str + # The size of the media file, in bytes. + length: Optional[int] = None + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class FileInfo: + """Details about a requested/uploaded file.""" + + # The server name where the media originated from, or None if local. + server_name: Optional[str] + # The local ID of the file. For local files this is the same as the media_id + file_id: str + # If the file is for the url preview cache + url_cache: bool = False + # Whether the file is a thumbnail or not. + thumbnail: Optional[ThumbnailInfo] = None + + # The below properties exist to maintain compatibility with third-party modules. + @property + def thumbnail_width(self) -> Optional[int]: + if not self.thumbnail: + return None + return self.thumbnail.width + + @property + def thumbnail_height(self) -> Optional[int]: + if not self.thumbnail: + return None + return self.thumbnail.height + + @property + def thumbnail_method(self) -> Optional[str]: + if not self.thumbnail: + return None + return self.thumbnail.method + + @property + def thumbnail_type(self) -> Optional[str]: + if not self.thumbnail: + return None + return self.thumbnail.type + + @property + def thumbnail_length(self) -> Optional[int]: + if not self.thumbnail: + return None + return self.thumbnail.length + + +def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]: + """ + Get the filename of the downloaded file by inspecting the + Content-Disposition HTTP header. + + Args: + headers: The HTTP request headers. + + Returns: + The filename, or None. + """ + content_disposition = headers.get(b"Content-Disposition", [b""]) + + # No header, bail out. + if not content_disposition[0]: + return None + + _, params = _parse_header(content_disposition[0]) + + upload_name = None + + # First check if there is a valid UTF-8 filename + upload_name_utf8 = params.get(b"filename*", None) + if upload_name_utf8: + if upload_name_utf8.lower().startswith(b"utf-8''"): + upload_name_utf8 = upload_name_utf8[7:] + # We have a filename*= section. This MUST be ASCII, and any UTF-8 + # bytes are %-quoted. + try: + # Once it is decoded, we can then unquote the %-encoded + # parts strictly into a unicode string. + upload_name = urllib.parse.unquote( + upload_name_utf8.decode("ascii"), errors="strict" + ) + except UnicodeDecodeError: + # Incorrect UTF-8. + pass + + # If there isn't check for an ascii name. + if not upload_name: + upload_name_ascii = params.get(b"filename", None) + if upload_name_ascii and is_ascii(upload_name_ascii): + upload_name = upload_name_ascii.decode("ascii") + + # This may be None here, indicating we did not find a matching name. + return upload_name + + +def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]: + """Parse a Content-type like header. + + Cargo-culted from `cgi`, but works on bytes rather than strings. + + Args: + line: header to be parsed + + Returns: + The main content-type, followed by the parameter dictionary + """ + parts = _parseparam(b";" + line) + key = next(parts) + pdict = {} + for p in parts: + i = p.find(b"=") + if i >= 0: + name = p[:i].strip().lower() + value = p[i + 1 :].strip() + + # strip double-quotes + if len(value) >= 2 and value[0:1] == value[-1:] == b'"': + value = value[1:-1] + value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"') + pdict[name] = value + + return key, pdict + + +def _parseparam(s: bytes) -> Generator[bytes, None, None]: + """Generator which splits the input on ;, respecting double-quoted sequences + + Cargo-culted from `cgi`, but works on bytes rather than strings. + + Args: + s: header to be parsed + + Returns: + The split input + """ + while s[:1] == b";": + s = s[1:] + + # look for the next ; + end = s.find(b";") + + # if there is an odd number of " marks between here and the next ;, skip to the + # next ; instead + while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2: + end = s.find(b";", end + 1) + + if end < 0: + end = len(s) + f = s[:end] + yield f.strip() + s = s[end:] diff --git a/synapse/rest/media/v1/filepath.py b/synapse/media/filepath.py similarity index 100% rename from synapse/rest/media/v1/filepath.py rename to synapse/media/filepath.py diff --git a/synapse/rest/media/v1/media_repository.py b/synapse/media/media_repository.py similarity index 92% rename from synapse/rest/media/v1/media_repository.py rename to synapse/media/media_repository.py index c70e1837afd0..b81e3c2b0c60 100644 --- a/synapse/rest/media/v1/media_repository.py +++ b/synapse/media/media_repository.py @@ -32,18 +32,10 @@ RequestSendFailed, SynapseError, ) -from synapse.config._base import ConfigError from synapse.config.repository import ThumbnailRequirement -from synapse.http.server import UnrecognizedRequestResource from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import UserID -from synapse.util.async_helpers import Linearizer -from synapse.util.retryutils import NotRetryingDestination -from synapse.util.stringutils import random_string - -from ._base import ( +from synapse.media._base import ( FileInfo, Responder, ThumbnailInfo, @@ -51,15 +43,15 @@ respond_404, respond_with_responder, ) -from .config_resource import MediaConfigResource -from .download_resource import DownloadResource -from .filepath import MediaFilePaths -from .media_storage import MediaStorage -from .preview_url_resource import PreviewUrlResource -from .storage_provider import StorageProviderWrapper -from .thumbnail_resource import ThumbnailResource -from .thumbnailer import Thumbnailer, ThumbnailError -from .upload_resource import UploadResource +from synapse.media.filepath import MediaFilePaths +from synapse.media.media_storage import MediaStorage +from synapse.media.storage_provider import StorageProviderWrapper +from synapse.media.thumbnailer import Thumbnailer, ThumbnailError +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import UserID +from synapse.util.async_helpers import Linearizer +from synapse.util.retryutils import NotRetryingDestination +from synapse.util.stringutils import random_string if TYPE_CHECKING: from synapse.server import HomeServer @@ -1044,69 +1036,3 @@ async def _remove_local_media_from_disk( removed_media.append(media_id) return removed_media, len(removed_media) - - -class MediaRepositoryResource(UnrecognizedRequestResource): - """File uploading and downloading. - - Uploads are POSTed to a resource which returns a token which is used to GET - the download:: - - => POST /_matrix/media/r0/upload HTTP/1.1 - Content-Type: - Content-Length: - - - - <= HTTP/1.1 200 OK - Content-Type: application/json - - { "content_uri": "mxc:///" } - - => GET /_matrix/media/r0/download// HTTP/1.1 - - <= HTTP/1.1 200 OK - Content-Type: - Content-Disposition: attachment;filename= - - - - Clients can get thumbnails by supplying a desired width and height and - thumbnailing method:: - - => GET /_matrix/media/r0/thumbnail/ - /?width=&height=&method= HTTP/1.1 - - <= HTTP/1.1 200 OK - Content-Type: image/jpeg or image/png - - - - The thumbnail methods are "crop" and "scale". "scale" tries to return an - image where either the width or the height is smaller than the requested - size. The client should then scale and letterbox the image if it needs to - fit within a given rectangle. "crop" tries to return an image where the - width and height are close to the requested size and the aspect matches - the requested size. The client should scale the image if it needs to fit - within a given rectangle. - """ - - def __init__(self, hs: "HomeServer"): - # If we're not configured to use it, raise if we somehow got here. - if not hs.config.media.can_load_media_repo: - raise ConfigError("Synapse is not configured to use a media repo.") - - super().__init__() - media_repo = hs.get_media_repository() - - self.putChild(b"upload", UploadResource(hs, media_repo)) - self.putChild(b"download", DownloadResource(hs, media_repo)) - self.putChild( - b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage) - ) - if hs.config.media.url_preview_enabled: - self.putChild( - b"preview_url", - PreviewUrlResource(hs, media_repo, media_repo.media_storage), - ) - self.putChild(b"config", MediaConfigResource(hs)) diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py new file mode 100644 index 000000000000..a7e22a91e115 --- /dev/null +++ b/synapse/media/media_storage.py @@ -0,0 +1,374 @@ +# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import contextlib +import logging +import os +import shutil +from types import TracebackType +from typing import ( + IO, + TYPE_CHECKING, + Any, + Awaitable, + BinaryIO, + Callable, + Generator, + Optional, + Sequence, + Tuple, + Type, +) + +import attr + +from twisted.internet.defer import Deferred +from twisted.internet.interfaces import IConsumer +from twisted.protocols.basic import FileSender + +import synapse +from synapse.api.errors import NotFoundError +from synapse.logging.context import defer_to_thread, make_deferred_yieldable +from synapse.util import Clock +from synapse.util.file_consumer import BackgroundFileConsumer + +from ._base import FileInfo, Responder +from .filepath import MediaFilePaths + +if TYPE_CHECKING: + from synapse.media.storage_provider import StorageProvider + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class MediaStorage: + """Responsible for storing/fetching files from local sources. + + Args: + hs + local_media_directory: Base path where we store media on disk + filepaths + storage_providers: List of StorageProvider that are used to fetch and store files. + """ + + def __init__( + self, + hs: "HomeServer", + local_media_directory: str, + filepaths: MediaFilePaths, + storage_providers: Sequence["StorageProvider"], + ): + self.hs = hs + self.reactor = hs.get_reactor() + self.local_media_directory = local_media_directory + self.filepaths = filepaths + self.storage_providers = storage_providers + self.spam_checker = hs.get_spam_checker() + self.clock = hs.get_clock() + + async def store_file(self, source: IO, file_info: FileInfo) -> str: + """Write `source` to the on disk media store, and also any other + configured storage providers + + Args: + source: A file like object that should be written + file_info: Info about the file to store + + Returns: + the file path written to in the primary media store + """ + + with self.store_into_file(file_info) as (f, fname, finish_cb): + # Write to the main repository + await self.write_to_file(source, f) + await finish_cb() + + return fname + + async def write_to_file(self, source: IO, output: IO) -> None: + """Asynchronously write the `source` to `output`.""" + await defer_to_thread(self.reactor, _write_file_synchronously, source, output) + + @contextlib.contextmanager + def store_into_file( + self, file_info: FileInfo + ) -> Generator[Tuple[BinaryIO, str, Callable[[], Awaitable[None]]], None, None]: + """Context manager used to get a file like object to write into, as + described by file_info. + + Actually yields a 3-tuple (file, fname, finish_cb), where file is a file + like object that can be written to, fname is the absolute path of file + on disk, and finish_cb is a function that returns an awaitable. + + fname can be used to read the contents from after upload, e.g. to + generate thumbnails. + + finish_cb must be called and waited on after the file has been + successfully been written to. Should not be called if there was an + error. + + Args: + file_info: Info about the file to store + + Example: + + with media_storage.store_into_file(info) as (f, fname, finish_cb): + # .. write into f ... + await finish_cb() + """ + + path = self._file_info_to_path(file_info) + fname = os.path.join(self.local_media_directory, path) + + dirname = os.path.dirname(fname) + os.makedirs(dirname, exist_ok=True) + + finished_called = [False] + + try: + with open(fname, "wb") as f: + + async def finish() -> None: + # Ensure that all writes have been flushed and close the + # file. + f.flush() + f.close() + + spam_check = await self.spam_checker.check_media_file_for_spam( + ReadableFileWrapper(self.clock, fname), file_info + ) + if spam_check != synapse.module_api.NOT_SPAM: + logger.info("Blocking media due to spam checker") + # Note that we'll delete the stored media, due to the + # try/except below. The media also won't be stored in + # the DB. + # We currently ignore any additional field returned by + # the spam-check API. + raise SpamMediaException(errcode=spam_check[0]) + + for provider in self.storage_providers: + await provider.store_file(path, file_info) + + finished_called[0] = True + + yield f, fname, finish + except Exception as e: + try: + os.remove(fname) + except Exception: + pass + + raise e from None + + if not finished_called: + raise Exception("Finished callback not called") + + async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]: + """Attempts to fetch media described by file_info from the local cache + and configured storage providers. + + Args: + file_info + + Returns: + Returns a Responder if the file was found, otherwise None. + """ + paths = [self._file_info_to_path(file_info)] + + # fallback for remote thumbnails with no method in the filename + if file_info.thumbnail and file_info.server_name: + paths.append( + self.filepaths.remote_media_thumbnail_rel_legacy( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail.width, + height=file_info.thumbnail.height, + content_type=file_info.thumbnail.type, + ) + ) + + for path in paths: + local_path = os.path.join(self.local_media_directory, path) + if os.path.exists(local_path): + logger.debug("responding with local file %s", local_path) + return FileResponder(open(local_path, "rb")) + logger.debug("local file %s did not exist", local_path) + + for provider in self.storage_providers: + for path in paths: + res: Any = await provider.fetch(path, file_info) + if res: + logger.debug("Streaming %s from %s", path, provider) + return res + logger.debug("%s not found on %s", path, provider) + + return None + + async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str: + """Ensures that the given file is in the local cache. Attempts to + download it from storage providers if it isn't. + + Args: + file_info + + Returns: + Full path to local file + """ + path = self._file_info_to_path(file_info) + local_path = os.path.join(self.local_media_directory, path) + if os.path.exists(local_path): + return local_path + + # Fallback for paths without method names + # Should be removed in the future + if file_info.thumbnail and file_info.server_name: + legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail.width, + height=file_info.thumbnail.height, + content_type=file_info.thumbnail.type, + ) + legacy_local_path = os.path.join(self.local_media_directory, legacy_path) + if os.path.exists(legacy_local_path): + return legacy_local_path + + dirname = os.path.dirname(local_path) + os.makedirs(dirname, exist_ok=True) + + for provider in self.storage_providers: + res: Any = await provider.fetch(path, file_info) + if res: + with res: + consumer = BackgroundFileConsumer( + open(local_path, "wb"), self.reactor + ) + await res.write_to_consumer(consumer) + await consumer.wait() + return local_path + + raise NotFoundError() + + def _file_info_to_path(self, file_info: FileInfo) -> str: + """Converts file_info into a relative path. + + The path is suitable for storing files under a directory, e.g. used to + store files on local FS under the base media repository directory. + """ + if file_info.url_cache: + if file_info.thumbnail: + return self.filepaths.url_cache_thumbnail_rel( + media_id=file_info.file_id, + width=file_info.thumbnail.width, + height=file_info.thumbnail.height, + content_type=file_info.thumbnail.type, + method=file_info.thumbnail.method, + ) + return self.filepaths.url_cache_filepath_rel(file_info.file_id) + + if file_info.server_name: + if file_info.thumbnail: + return self.filepaths.remote_media_thumbnail_rel( + server_name=file_info.server_name, + file_id=file_info.file_id, + width=file_info.thumbnail.width, + height=file_info.thumbnail.height, + content_type=file_info.thumbnail.type, + method=file_info.thumbnail.method, + ) + return self.filepaths.remote_media_filepath_rel( + file_info.server_name, file_info.file_id + ) + + if file_info.thumbnail: + return self.filepaths.local_media_thumbnail_rel( + media_id=file_info.file_id, + width=file_info.thumbnail.width, + height=file_info.thumbnail.height, + content_type=file_info.thumbnail.type, + method=file_info.thumbnail.method, + ) + return self.filepaths.local_media_filepath_rel(file_info.file_id) + + +def _write_file_synchronously(source: IO, dest: IO) -> None: + """Write `source` to the file like `dest` synchronously. Should be called + from a thread. + + Args: + source: A file like object that's to be written + dest: A file like object to be written to + """ + source.seek(0) # Ensure we read from the start of the file + shutil.copyfileobj(source, dest) + + +class FileResponder(Responder): + """Wraps an open file that can be sent to a request. + + Args: + open_file: A file like object to be streamed ot the client, + is closed when finished streaming. + """ + + def __init__(self, open_file: IO): + self.open_file = open_file + + def write_to_consumer(self, consumer: IConsumer) -> Deferred: + return make_deferred_yieldable( + FileSender().beginFileTransfer(self.open_file, consumer) + ) + + def __exit__( + self, + exc_type: Optional[Type[BaseException]], + exc_val: Optional[BaseException], + exc_tb: Optional[TracebackType], + ) -> None: + self.open_file.close() + + +class SpamMediaException(NotFoundError): + """The media was blocked by a spam checker, so we simply 404 the request (in + the same way as if it was quarantined). + """ + + +@attr.s(slots=True, auto_attribs=True) +class ReadableFileWrapper: + """Wrapper that allows reading a file in chunks, yielding to the reactor, + and writing to a callback. + + This is simplified `FileSender` that takes an IO object rather than an + `IConsumer`. + """ + + CHUNK_SIZE = 2**14 + + clock: Clock + path: str + + async def write_chunks_to(self, callback: Callable[[bytes], object]) -> None: + """Reads the file in chunks and calls the callback with each chunk.""" + + with open(self.path, "rb") as file: + while True: + chunk = file.read(self.CHUNK_SIZE) + if not chunk: + break + + callback(chunk) + + # We yield to the reactor by sleeping for 0 seconds. + await self.clock.sleep(0) diff --git a/synapse/rest/media/v1/oembed.py b/synapse/media/oembed.py similarity index 99% rename from synapse/rest/media/v1/oembed.py rename to synapse/media/oembed.py index 7592aa5d4767..c0eaf04be544 100644 --- a/synapse/rest/media/v1/oembed.py +++ b/synapse/media/oembed.py @@ -18,7 +18,7 @@ import attr -from synapse.rest.media.v1.preview_html import parse_html_description +from synapse.media.preview_html import parse_html_description from synapse.types import JsonDict from synapse.util import json_decoder diff --git a/synapse/rest/media/v1/preview_html.py b/synapse/media/preview_html.py similarity index 100% rename from synapse/rest/media/v1/preview_html.py rename to synapse/media/preview_html.py diff --git a/synapse/media/storage_provider.py b/synapse/media/storage_provider.py new file mode 100644 index 000000000000..1c9b71d69c77 --- /dev/null +++ b/synapse/media/storage_provider.py @@ -0,0 +1,181 @@ +# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import abc +import logging +import os +import shutil +from typing import TYPE_CHECKING, Callable, Optional + +from synapse.config._base import Config +from synapse.logging.context import defer_to_thread, run_in_background +from synapse.util.async_helpers import maybe_awaitable + +from ._base import FileInfo, Responder +from .media_storage import FileResponder + +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class StorageProvider(metaclass=abc.ABCMeta): + """A storage provider is a service that can store uploaded media and + retrieve them. + """ + + @abc.abstractmethod + async def store_file(self, path: str, file_info: FileInfo) -> None: + """Store the file described by file_info. The actual contents can be + retrieved by reading the file in file_info.upload_path. + + Args: + path: Relative path of file in local cache + file_info: The metadata of the file. + """ + + @abc.abstractmethod + async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: + """Attempt to fetch the file described by file_info and stream it + into writer. + + Args: + path: Relative path of file in local cache + file_info: The metadata of the file. + + Returns: + Returns a Responder if the provider has the file, otherwise returns None. + """ + + +class StorageProviderWrapper(StorageProvider): + """Wraps a storage provider and provides various config options + + Args: + backend: The storage provider to wrap. + store_local: Whether to store new local files or not. + store_synchronous: Whether to wait for file to be successfully + uploaded, or todo the upload in the background. + store_remote: Whether remote media should be uploaded + """ + + def __init__( + self, + backend: StorageProvider, + store_local: bool, + store_synchronous: bool, + store_remote: bool, + ): + self.backend = backend + self.store_local = store_local + self.store_synchronous = store_synchronous + self.store_remote = store_remote + + def __str__(self) -> str: + return "StorageProviderWrapper[%s]" % (self.backend,) + + async def store_file(self, path: str, file_info: FileInfo) -> None: + if not file_info.server_name and not self.store_local: + return None + + if file_info.server_name and not self.store_remote: + return None + + if file_info.url_cache: + # The URL preview cache is short lived and not worth offloading or + # backing up. + return None + + if self.store_synchronous: + # store_file is supposed to return an Awaitable, but guard + # against improper implementations. + await maybe_awaitable(self.backend.store_file(path, file_info)) # type: ignore + else: + # TODO: Handle errors. + async def store() -> None: + try: + return await maybe_awaitable( + self.backend.store_file(path, file_info) + ) + except Exception: + logger.exception("Error storing file") + + run_in_background(store) + + async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: + if file_info.url_cache: + # Files in the URL preview cache definitely aren't stored here, + # so avoid any potentially slow I/O or network access. + return None + + # store_file is supposed to return an Awaitable, but guard + # against improper implementations. + return await maybe_awaitable(self.backend.fetch(path, file_info)) + + +class FileStorageProviderBackend(StorageProvider): + """A storage provider that stores files in a directory on a filesystem. + + Args: + hs + config: The config returned by `parse_config`. + """ + + def __init__(self, hs: "HomeServer", config: str): + self.hs = hs + self.cache_directory = hs.config.media.media_store_path + self.base_directory = config + + def __str__(self) -> str: + return "FileStorageProviderBackend[%s]" % (self.base_directory,) + + async def store_file(self, path: str, file_info: FileInfo) -> None: + """See StorageProvider.store_file""" + + primary_fname = os.path.join(self.cache_directory, path) + backup_fname = os.path.join(self.base_directory, path) + + dirname = os.path.dirname(backup_fname) + os.makedirs(dirname, exist_ok=True) + + # mypy needs help inferring the type of the second parameter, which is generic + shutil_copyfile: Callable[[str, str], str] = shutil.copyfile + await defer_to_thread( + self.hs.get_reactor(), + shutil_copyfile, + primary_fname, + backup_fname, + ) + + async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: + """See StorageProvider.fetch""" + + backup_fname = os.path.join(self.base_directory, path) + if os.path.isfile(backup_fname): + return FileResponder(open(backup_fname, "rb")) + + return None + + @staticmethod + def parse_config(config: dict) -> str: + """Called on startup to parse config supplied. This should parse + the config and raise if there is a problem. + + The returned value is passed into the constructor. + + In this case we only care about a single param, the directory, so let's + just pull that out. + """ + return Config.ensure_directory(config["directory"]) diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/media/thumbnailer.py similarity index 100% rename from synapse/rest/media/v1/thumbnailer.py rename to synapse/media/thumbnailer.py diff --git a/synapse/rest/media/v1/config_resource.py b/synapse/rest/media/config_resource.py similarity index 100% rename from synapse/rest/media/v1/config_resource.py rename to synapse/rest/media/config_resource.py diff --git a/synapse/rest/media/v1/download_resource.py b/synapse/rest/media/download_resource.py similarity index 95% rename from synapse/rest/media/v1/download_resource.py rename to synapse/rest/media/download_resource.py index 048a042692c3..8f270cf4ccb8 100644 --- a/synapse/rest/media/v1/download_resource.py +++ b/synapse/rest/media/download_resource.py @@ -22,11 +22,10 @@ ) from synapse.http.servlet import parse_boolean from synapse.http.site import SynapseRequest - -from ._base import parse_media_id, respond_404 +from synapse.media._base import parse_media_id, respond_404 if TYPE_CHECKING: - from synapse.rest.media.v1.media_repository import MediaRepository + from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/media_repository_resource.py b/synapse/rest/media/media_repository_resource.py new file mode 100644 index 000000000000..5ebaa3b032cd --- /dev/null +++ b/synapse/rest/media/media_repository_resource.py @@ -0,0 +1,93 @@ +# Copyright 2014-2016 OpenMarket Ltd +# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from synapse.config._base import ConfigError +from synapse.http.server import UnrecognizedRequestResource + +from .config_resource import MediaConfigResource +from .download_resource import DownloadResource +from .preview_url_resource import PreviewUrlResource +from .thumbnail_resource import ThumbnailResource +from .upload_resource import UploadResource + +if TYPE_CHECKING: + from synapse.server import HomeServer + + +class MediaRepositoryResource(UnrecognizedRequestResource): + """File uploading and downloading. + + Uploads are POSTed to a resource which returns a token which is used to GET + the download:: + + => POST /_matrix/media/r0/upload HTTP/1.1 + Content-Type: + Content-Length: + + + + <= HTTP/1.1 200 OK + Content-Type: application/json + + { "content_uri": "mxc:///" } + + => GET /_matrix/media/r0/download// HTTP/1.1 + + <= HTTP/1.1 200 OK + Content-Type: + Content-Disposition: attachment;filename= + + + + Clients can get thumbnails by supplying a desired width and height and + thumbnailing method:: + + => GET /_matrix/media/r0/thumbnail/ + /?width=&height=&method= HTTP/1.1 + + <= HTTP/1.1 200 OK + Content-Type: image/jpeg or image/png + + + + The thumbnail methods are "crop" and "scale". "scale" tries to return an + image where either the width or the height is smaller than the requested + size. The client should then scale and letterbox the image if it needs to + fit within a given rectangle. "crop" tries to return an image where the + width and height are close to the requested size and the aspect matches + the requested size. The client should scale the image if it needs to fit + within a given rectangle. + """ + + def __init__(self, hs: "HomeServer"): + # If we're not configured to use it, raise if we somehow got here. + if not hs.config.media.can_load_media_repo: + raise ConfigError("Synapse is not configured to use a media repo.") + + super().__init__() + media_repo = hs.get_media_repository() + + self.putChild(b"upload", UploadResource(hs, media_repo)) + self.putChild(b"download", DownloadResource(hs, media_repo)) + self.putChild( + b"thumbnail", ThumbnailResource(hs, media_repo, media_repo.media_storage) + ) + if hs.config.media.url_preview_enabled: + self.putChild( + b"preview_url", + PreviewUrlResource(hs, media_repo, media_repo.media_storage), + ) + self.putChild(b"config", MediaConfigResource(hs)) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/preview_url_resource.py similarity index 98% rename from synapse/rest/media/v1/preview_url_resource.py rename to synapse/rest/media/preview_url_resource.py index 4a594ab9d83c..7ada72875755 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/preview_url_resource.py @@ -40,21 +40,19 @@ from synapse.http.servlet import parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.media._base import FileInfo, get_filename_from_headers +from synapse.media.media_storage import MediaStorage +from synapse.media.oembed import OEmbedProvider +from synapse.media.preview_html import decode_body, parse_html_to_open_graph from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.rest.media.v1._base import get_filename_from_headers -from synapse.rest.media.v1.media_storage import MediaStorage -from synapse.rest.media.v1.oembed import OEmbedProvider -from synapse.rest.media.v1.preview_html import decode_body, parse_html_to_open_graph from synapse.types import JsonDict, UserID from synapse.util import json_encoder from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.stringutils import random_string -from ._base import FileInfo - if TYPE_CHECKING: - from synapse.rest.media.v1.media_repository import MediaRepository + from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/thumbnail_resource.py b/synapse/rest/media/thumbnail_resource.py similarity index 99% rename from synapse/rest/media/v1/thumbnail_resource.py rename to synapse/rest/media/thumbnail_resource.py index 3e720018b31c..4ee2a0dbda79 100644 --- a/synapse/rest/media/v1/thumbnail_resource.py +++ b/synapse/rest/media/thumbnail_resource.py @@ -27,9 +27,7 @@ ) from synapse.http.servlet import parse_integer, parse_string from synapse.http.site import SynapseRequest -from synapse.rest.media.v1.media_storage import MediaStorage - -from ._base import ( +from synapse.media._base import ( FileInfo, ThumbnailInfo, parse_media_id, @@ -37,9 +35,10 @@ respond_with_file, respond_with_responder, ) +from synapse.media.media_storage import MediaStorage if TYPE_CHECKING: - from synapse.rest.media.v1.media_repository import MediaRepository + from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/upload_resource.py b/synapse/rest/media/upload_resource.py similarity index 96% rename from synapse/rest/media/v1/upload_resource.py rename to synapse/rest/media/upload_resource.py index 97548b54e504..697348613b52 100644 --- a/synapse/rest/media/v1/upload_resource.py +++ b/synapse/rest/media/upload_resource.py @@ -20,10 +20,10 @@ from synapse.http.server import DirectServeJsonResource, respond_with_json from synapse.http.servlet import parse_bytes_from_args from synapse.http.site import SynapseRequest -from synapse.rest.media.v1.media_storage import SpamMediaException +from synapse.media.media_storage import SpamMediaException if TYPE_CHECKING: - from synapse.rest.media.v1.media_repository import MediaRepository + from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer logger = logging.getLogger(__name__) diff --git a/synapse/rest/media/v1/_base.py b/synapse/rest/media/v1/_base.py index ef8334ae2586..88427a573708 100644 --- a/synapse/rest/media/v1/_base.py +++ b/synapse/rest/media/v1/_base.py @@ -1,5 +1,4 @@ -# Copyright 2014-2016 OpenMarket Ltd -# Copyright 2019-2021 The Matrix.org Foundation C.I.C. +# Copyright 2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,468 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -import logging -import os -import urllib -from abc import ABC, abstractmethod -from types import TracebackType -from typing import Awaitable, Dict, Generator, List, Optional, Tuple, Type - -import attr - -from twisted.internet.interfaces import IConsumer -from twisted.protocols.basic import FileSender -from twisted.web.server import Request - -from synapse.api.errors import Codes, SynapseError, cs_error -from synapse.http.server import finish_request, respond_with_json -from synapse.http.site import SynapseRequest -from synapse.logging.context import make_deferred_yieldable -from synapse.util.stringutils import is_ascii, parse_and_validate_server_name - -logger = logging.getLogger(__name__) - -# list all text content types that will have the charset default to UTF-8 when -# none is given -TEXT_CONTENT_TYPES = [ - "text/css", - "text/csv", - "text/html", - "text/calendar", - "text/plain", - "text/javascript", - "application/json", - "application/ld+json", - "application/rtf", - "image/svg+xml", - "text/xml", -] - - -def parse_media_id(request: Request) -> Tuple[str, str, Optional[str]]: - """Parses the server name, media ID and optional file name from the request URI - - Also performs some rough validation on the server name. - - Args: - request: The `Request`. - - Returns: - A tuple containing the parsed server name, media ID and optional file name. - - Raises: - SynapseError(404): if parsing or validation fail for any reason - """ - try: - # The type on postpath seems incorrect in Twisted 21.2.0. - postpath: List[bytes] = request.postpath # type: ignore - assert postpath - - # This allows users to append e.g. /test.png to the URL. Useful for - # clients that parse the URL to see content type. - server_name_bytes, media_id_bytes = postpath[:2] - server_name = server_name_bytes.decode("utf-8") - media_id = media_id_bytes.decode("utf8") - - # Validate the server name, raising if invalid - parse_and_validate_server_name(server_name) - - file_name = None - if len(postpath) > 2: - try: - file_name = urllib.parse.unquote(postpath[-1].decode("utf-8")) - except UnicodeDecodeError: - pass - return server_name, media_id, file_name - except Exception: - raise SynapseError( - 404, "Invalid media id token %r" % (request.postpath,), Codes.UNKNOWN - ) - - -def respond_404(request: SynapseRequest) -> None: - respond_with_json( - request, - 404, - cs_error("Not found %r" % (request.postpath,), code=Codes.NOT_FOUND), - send_cors=True, - ) - - -async def respond_with_file( - request: SynapseRequest, - media_type: str, - file_path: str, - file_size: Optional[int] = None, - upload_name: Optional[str] = None, -) -> None: - logger.debug("Responding with %r", file_path) - - if os.path.isfile(file_path): - if file_size is None: - stat = os.stat(file_path) - file_size = stat.st_size - - add_file_headers(request, media_type, file_size, upload_name) - - with open(file_path, "rb") as f: - await make_deferred_yieldable(FileSender().beginFileTransfer(f, request)) - - finish_request(request) - else: - respond_404(request) - - -def add_file_headers( - request: Request, - media_type: str, - file_size: Optional[int], - upload_name: Optional[str], -) -> None: - """Adds the correct response headers in preparation for responding with the - media. - - Args: - request - media_type: The media/content type. - file_size: Size in bytes of the media, if known. - upload_name: The name of the requested file, if any. - """ - - def _quote(x: str) -> str: - return urllib.parse.quote(x.encode("utf-8")) - - # Default to a UTF-8 charset for text content types. - # ex, uses UTF-8 for 'text/css' but not 'text/css; charset=UTF-16' - if media_type.lower() in TEXT_CONTENT_TYPES: - content_type = media_type + "; charset=UTF-8" - else: - content_type = media_type - - request.setHeader(b"Content-Type", content_type.encode("UTF-8")) - if upload_name: - # RFC6266 section 4.1 [1] defines both `filename` and `filename*`. - # - # `filename` is defined to be a `value`, which is defined by RFC2616 - # section 3.6 [2] to be a `token` or a `quoted-string`, where a `token` - # is (essentially) a single US-ASCII word, and a `quoted-string` is a - # US-ASCII string surrounded by double-quotes, using backslash as an - # escape character. Note that %-encoding is *not* permitted. - # - # `filename*` is defined to be an `ext-value`, which is defined in - # RFC5987 section 3.2.1 [3] to be `charset "'" [ language ] "'" value-chars`, - # where `value-chars` is essentially a %-encoded string in the given charset. - # - # [1]: https://tools.ietf.org/html/rfc6266#section-4.1 - # [2]: https://tools.ietf.org/html/rfc2616#section-3.6 - # [3]: https://tools.ietf.org/html/rfc5987#section-3.2.1 - - # We avoid the quoted-string version of `filename`, because (a) synapse didn't - # correctly interpret those as of 0.99.2 and (b) they are a bit of a pain and we - # may as well just do the filename* version. - if _can_encode_filename_as_token(upload_name): - disposition = "inline; filename=%s" % (upload_name,) - else: - disposition = "inline; filename*=utf-8''%s" % (_quote(upload_name),) - - request.setHeader(b"Content-Disposition", disposition.encode("ascii")) - - # cache for at least a day. - # XXX: we might want to turn this off for data we don't want to - # recommend caching as it's sensitive or private - or at least - # select private. don't bother setting Expires as all our - # clients are smart enough to be happy with Cache-Control - request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400") - if file_size is not None: - request.setHeader(b"Content-Length", b"%d" % (file_size,)) - - # Tell web crawlers to not index, archive, or follow links in media. This - # should help to prevent things in the media repo from showing up in web - # search results. - request.setHeader(b"X-Robots-Tag", "noindex, nofollow, noarchive, noimageindex") - - -# separators as defined in RFC2616. SP and HT are handled separately. -# see _can_encode_filename_as_token. -_FILENAME_SEPARATOR_CHARS = { - "(", - ")", - "<", - ">", - "@", - ",", - ";", - ":", - "\\", - '"', - "/", - "[", - "]", - "?", - "=", - "{", - "}", -} - - -def _can_encode_filename_as_token(x: str) -> bool: - for c in x: - # from RFC2616: - # - # token = 1* - # - # separators = "(" | ")" | "<" | ">" | "@" - # | "," | ";" | ":" | "\" | <"> - # | "/" | "[" | "]" | "?" | "=" - # | "{" | "}" | SP | HT - # - # CHAR = - # - # CTL = - # - if ord(c) >= 127 or ord(c) <= 32 or c in _FILENAME_SEPARATOR_CHARS: - return False - return True - - -async def respond_with_responder( - request: SynapseRequest, - responder: "Optional[Responder]", - media_type: str, - file_size: Optional[int], - upload_name: Optional[str] = None, -) -> None: - """Responds to the request with given responder. If responder is None then - returns 404. - - Args: - request - responder - media_type: The media/content type. - file_size: Size in bytes of the media. If not known it should be None - upload_name: The name of the requested file, if any. - """ - if not responder: - respond_404(request) - return - - # If we have a responder we *must* use it as a context manager. - with responder: - if request._disconnected: - logger.warning( - "Not sending response to request %s, already disconnected.", request - ) - return - - logger.debug("Responding to media request with responder %s", responder) - add_file_headers(request, media_type, file_size, upload_name) - try: - await responder.write_to_consumer(request) - except Exception as e: - # The majority of the time this will be due to the client having gone - # away. Unfortunately, Twisted simply throws a generic exception at us - # in that case. - logger.warning("Failed to write to consumer: %s %s", type(e), e) - - # Unregister the producer, if it has one, so Twisted doesn't complain - if request.producer: - request.unregisterProducer() - - finish_request(request) - - -class Responder(ABC): - """Represents a response that can be streamed to the requester. - - Responder is a context manager which *must* be used, so that any resources - held can be cleaned up. - """ - - @abstractmethod - def write_to_consumer(self, consumer: IConsumer) -> Awaitable: - """Stream response into consumer - - Args: - consumer: The consumer to stream into. - - Returns: - Resolves once the response has finished being written - """ - raise NotImplementedError() - - def __enter__(self) -> None: # noqa: B027 - pass - - def __exit__( # noqa: B027 - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - pass - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class ThumbnailInfo: - """Details about a generated thumbnail.""" - - width: int - height: int - method: str - # Content type of thumbnail, e.g. image/png - type: str - # The size of the media file, in bytes. - length: Optional[int] = None - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class FileInfo: - """Details about a requested/uploaded file.""" - - # The server name where the media originated from, or None if local. - server_name: Optional[str] - # The local ID of the file. For local files this is the same as the media_id - file_id: str - # If the file is for the url preview cache - url_cache: bool = False - # Whether the file is a thumbnail or not. - thumbnail: Optional[ThumbnailInfo] = None - - # The below properties exist to maintain compatibility with third-party modules. - @property - def thumbnail_width(self) -> Optional[int]: - if not self.thumbnail: - return None - return self.thumbnail.width - - @property - def thumbnail_height(self) -> Optional[int]: - if not self.thumbnail: - return None - return self.thumbnail.height - - @property - def thumbnail_method(self) -> Optional[str]: - if not self.thumbnail: - return None - return self.thumbnail.method - - @property - def thumbnail_type(self) -> Optional[str]: - if not self.thumbnail: - return None - return self.thumbnail.type - - @property - def thumbnail_length(self) -> Optional[int]: - if not self.thumbnail: - return None - return self.thumbnail.length - - -def get_filename_from_headers(headers: Dict[bytes, List[bytes]]) -> Optional[str]: - """ - Get the filename of the downloaded file by inspecting the - Content-Disposition HTTP header. - - Args: - headers: The HTTP request headers. - - Returns: - The filename, or None. - """ - content_disposition = headers.get(b"Content-Disposition", [b""]) - - # No header, bail out. - if not content_disposition[0]: - return None - - _, params = _parse_header(content_disposition[0]) - - upload_name = None - - # First check if there is a valid UTF-8 filename - upload_name_utf8 = params.get(b"filename*", None) - if upload_name_utf8: - if upload_name_utf8.lower().startswith(b"utf-8''"): - upload_name_utf8 = upload_name_utf8[7:] - # We have a filename*= section. This MUST be ASCII, and any UTF-8 - # bytes are %-quoted. - try: - # Once it is decoded, we can then unquote the %-encoded - # parts strictly into a unicode string. - upload_name = urllib.parse.unquote( - upload_name_utf8.decode("ascii"), errors="strict" - ) - except UnicodeDecodeError: - # Incorrect UTF-8. - pass - - # If there isn't check for an ascii name. - if not upload_name: - upload_name_ascii = params.get(b"filename", None) - if upload_name_ascii and is_ascii(upload_name_ascii): - upload_name = upload_name_ascii.decode("ascii") - - # This may be None here, indicating we did not find a matching name. - return upload_name - - -def _parse_header(line: bytes) -> Tuple[bytes, Dict[bytes, bytes]]: - """Parse a Content-type like header. - - Cargo-culted from `cgi`, but works on bytes rather than strings. - - Args: - line: header to be parsed - - Returns: - The main content-type, followed by the parameter dictionary - """ - parts = _parseparam(b";" + line) - key = next(parts) - pdict = {} - for p in parts: - i = p.find(b"=") - if i >= 0: - name = p[:i].strip().lower() - value = p[i + 1 :].strip() - - # strip double-quotes - if len(value) >= 2 and value[0:1] == value[-1:] == b'"': - value = value[1:-1] - value = value.replace(b"\\\\", b"\\").replace(b'\\"', b'"') - pdict[name] = value - - return key, pdict - - -def _parseparam(s: bytes) -> Generator[bytes, None, None]: - """Generator which splits the input on ;, respecting double-quoted sequences - - Cargo-culted from `cgi`, but works on bytes rather than strings. - - Args: - s: header to be parsed - - Returns: - The split input - """ - while s[:1] == b";": - s = s[1:] - - # look for the next ; - end = s.find(b";") - - # if there is an odd number of " marks between here and the next ;, skip to the - # next ; instead - while end > 0 and (s.count(b'"', 0, end) - s.count(b'\\"', 0, end)) % 2: - end = s.find(b";", end + 1) - - if end < 0: - end = len(s) - f = s[:end] - yield f.strip() - s = s[end:] +# This exists purely for backwards compatibility with media providers and spam checkers. +from synapse.media._base import FileInfo, Responder # noqa: F401 diff --git a/synapse/rest/media/v1/media_storage.py b/synapse/rest/media/v1/media_storage.py index db25848744de..11b0e8e23185 100644 --- a/synapse/rest/media/v1/media_storage.py +++ b/synapse/rest/media/v1/media_storage.py @@ -1,4 +1,4 @@ -# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# Copyright 2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,364 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import contextlib -import logging -import os -import shutil -from types import TracebackType -from typing import ( - IO, - TYPE_CHECKING, - Any, - Awaitable, - BinaryIO, - Callable, - Generator, - Optional, - Sequence, - Tuple, - Type, -) - -import attr - -from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IConsumer -from twisted.protocols.basic import FileSender - -import synapse -from synapse.api.errors import NotFoundError -from synapse.logging.context import defer_to_thread, make_deferred_yieldable -from synapse.util import Clock -from synapse.util.file_consumer import BackgroundFileConsumer - -from ._base import FileInfo, Responder -from .filepath import MediaFilePaths - -if TYPE_CHECKING: - from synapse.rest.media.v1.storage_provider import StorageProvider - from synapse.server import HomeServer - -logger = logging.getLogger(__name__) - - -class MediaStorage: - """Responsible for storing/fetching files from local sources. - - Args: - hs - local_media_directory: Base path where we store media on disk - filepaths - storage_providers: List of StorageProvider that are used to fetch and store files. - """ - - def __init__( - self, - hs: "HomeServer", - local_media_directory: str, - filepaths: MediaFilePaths, - storage_providers: Sequence["StorageProvider"], - ): - self.hs = hs - self.reactor = hs.get_reactor() - self.local_media_directory = local_media_directory - self.filepaths = filepaths - self.storage_providers = storage_providers - self.spam_checker = hs.get_spam_checker() - self.clock = hs.get_clock() - - async def store_file(self, source: IO, file_info: FileInfo) -> str: - """Write `source` to the on disk media store, and also any other - configured storage providers - - Args: - source: A file like object that should be written - file_info: Info about the file to store - - Returns: - the file path written to in the primary media store - """ - - with self.store_into_file(file_info) as (f, fname, finish_cb): - # Write to the main repository - await self.write_to_file(source, f) - await finish_cb() - - return fname - - async def write_to_file(self, source: IO, output: IO) -> None: - """Asynchronously write the `source` to `output`.""" - await defer_to_thread(self.reactor, _write_file_synchronously, source, output) - - @contextlib.contextmanager - def store_into_file( - self, file_info: FileInfo - ) -> Generator[Tuple[BinaryIO, str, Callable[[], Awaitable[None]]], None, None]: - """Context manager used to get a file like object to write into, as - described by file_info. - - Actually yields a 3-tuple (file, fname, finish_cb), where file is a file - like object that can be written to, fname is the absolute path of file - on disk, and finish_cb is a function that returns an awaitable. - - fname can be used to read the contents from after upload, e.g. to - generate thumbnails. - - finish_cb must be called and waited on after the file has been - successfully been written to. Should not be called if there was an - error. - - Args: - file_info: Info about the file to store - - Example: - - with media_storage.store_into_file(info) as (f, fname, finish_cb): - # .. write into f ... - await finish_cb() - """ - - path = self._file_info_to_path(file_info) - fname = os.path.join(self.local_media_directory, path) - - dirname = os.path.dirname(fname) - os.makedirs(dirname, exist_ok=True) - - finished_called = [False] - - try: - with open(fname, "wb") as f: - - async def finish() -> None: - # Ensure that all writes have been flushed and close the - # file. - f.flush() - f.close() - - spam_check = await self.spam_checker.check_media_file_for_spam( - ReadableFileWrapper(self.clock, fname), file_info - ) - if spam_check != synapse.module_api.NOT_SPAM: - logger.info("Blocking media due to spam checker") - # Note that we'll delete the stored media, due to the - # try/except below. The media also won't be stored in - # the DB. - # We currently ignore any additional field returned by - # the spam-check API. - raise SpamMediaException(errcode=spam_check[0]) - - for provider in self.storage_providers: - await provider.store_file(path, file_info) - - finished_called[0] = True - - yield f, fname, finish - except Exception as e: - try: - os.remove(fname) - except Exception: - pass - - raise e from None - - if not finished_called: - raise Exception("Finished callback not called") - - async def fetch_media(self, file_info: FileInfo) -> Optional[Responder]: - """Attempts to fetch media described by file_info from the local cache - and configured storage providers. - - Args: - file_info - - Returns: - Returns a Responder if the file was found, otherwise None. - """ - paths = [self._file_info_to_path(file_info)] - - # fallback for remote thumbnails with no method in the filename - if file_info.thumbnail and file_info.server_name: - paths.append( - self.filepaths.remote_media_thumbnail_rel_legacy( - server_name=file_info.server_name, - file_id=file_info.file_id, - width=file_info.thumbnail.width, - height=file_info.thumbnail.height, - content_type=file_info.thumbnail.type, - ) - ) - - for path in paths: - local_path = os.path.join(self.local_media_directory, path) - if os.path.exists(local_path): - logger.debug("responding with local file %s", local_path) - return FileResponder(open(local_path, "rb")) - logger.debug("local file %s did not exist", local_path) - - for provider in self.storage_providers: - for path in paths: - res: Any = await provider.fetch(path, file_info) - if res: - logger.debug("Streaming %s from %s", path, provider) - return res - logger.debug("%s not found on %s", path, provider) - - return None - - async def ensure_media_is_in_local_cache(self, file_info: FileInfo) -> str: - """Ensures that the given file is in the local cache. Attempts to - download it from storage providers if it isn't. - - Args: - file_info - - Returns: - Full path to local file - """ - path = self._file_info_to_path(file_info) - local_path = os.path.join(self.local_media_directory, path) - if os.path.exists(local_path): - return local_path - - # Fallback for paths without method names - # Should be removed in the future - if file_info.thumbnail and file_info.server_name: - legacy_path = self.filepaths.remote_media_thumbnail_rel_legacy( - server_name=file_info.server_name, - file_id=file_info.file_id, - width=file_info.thumbnail.width, - height=file_info.thumbnail.height, - content_type=file_info.thumbnail.type, - ) - legacy_local_path = os.path.join(self.local_media_directory, legacy_path) - if os.path.exists(legacy_local_path): - return legacy_local_path - - dirname = os.path.dirname(local_path) - os.makedirs(dirname, exist_ok=True) - - for provider in self.storage_providers: - res: Any = await provider.fetch(path, file_info) - if res: - with res: - consumer = BackgroundFileConsumer( - open(local_path, "wb"), self.reactor - ) - await res.write_to_consumer(consumer) - await consumer.wait() - return local_path - - raise NotFoundError() - - def _file_info_to_path(self, file_info: FileInfo) -> str: - """Converts file_info into a relative path. - - The path is suitable for storing files under a directory, e.g. used to - store files on local FS under the base media repository directory. - """ - if file_info.url_cache: - if file_info.thumbnail: - return self.filepaths.url_cache_thumbnail_rel( - media_id=file_info.file_id, - width=file_info.thumbnail.width, - height=file_info.thumbnail.height, - content_type=file_info.thumbnail.type, - method=file_info.thumbnail.method, - ) - return self.filepaths.url_cache_filepath_rel(file_info.file_id) - - if file_info.server_name: - if file_info.thumbnail: - return self.filepaths.remote_media_thumbnail_rel( - server_name=file_info.server_name, - file_id=file_info.file_id, - width=file_info.thumbnail.width, - height=file_info.thumbnail.height, - content_type=file_info.thumbnail.type, - method=file_info.thumbnail.method, - ) - return self.filepaths.remote_media_filepath_rel( - file_info.server_name, file_info.file_id - ) - - if file_info.thumbnail: - return self.filepaths.local_media_thumbnail_rel( - media_id=file_info.file_id, - width=file_info.thumbnail.width, - height=file_info.thumbnail.height, - content_type=file_info.thumbnail.type, - method=file_info.thumbnail.method, - ) - return self.filepaths.local_media_filepath_rel(file_info.file_id) - - -def _write_file_synchronously(source: IO, dest: IO) -> None: - """Write `source` to the file like `dest` synchronously. Should be called - from a thread. - - Args: - source: A file like object that's to be written - dest: A file like object to be written to - """ - source.seek(0) # Ensure we read from the start of the file - shutil.copyfileobj(source, dest) - - -class FileResponder(Responder): - """Wraps an open file that can be sent to a request. - - Args: - open_file: A file like object to be streamed ot the client, - is closed when finished streaming. - """ - - def __init__(self, open_file: IO): - self.open_file = open_file - - def write_to_consumer(self, consumer: IConsumer) -> Deferred: - return make_deferred_yieldable( - FileSender().beginFileTransfer(self.open_file, consumer) - ) - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.open_file.close() - - -class SpamMediaException(NotFoundError): - """The media was blocked by a spam checker, so we simply 404 the request (in - the same way as if it was quarantined). - """ - - -@attr.s(slots=True, auto_attribs=True) -class ReadableFileWrapper: - """Wrapper that allows reading a file in chunks, yielding to the reactor, - and writing to a callback. - - This is simplified `FileSender` that takes an IO object rather than an - `IConsumer`. - """ - - CHUNK_SIZE = 2**14 - - clock: Clock - path: str - - async def write_chunks_to(self, callback: Callable[[bytes], object]) -> None: - """Reads the file in chunks and calls the callback with each chunk.""" - - with open(self.path, "rb") as file: - while True: - chunk = file.read(self.CHUNK_SIZE) - if not chunk: - break - - callback(chunk) +# - # We yield to the reactor by sleeping for 0 seconds. - await self.clock.sleep(0) +# This exists purely for backwards compatibility with spam checkers. +from synapse.media.media_storage import ReadableFileWrapper # noqa: F401 diff --git a/synapse/rest/media/v1/storage_provider.py b/synapse/rest/media/v1/storage_provider.py index 1c9b71d69c77..d7653f30aead 100644 --- a/synapse/rest/media/v1/storage_provider.py +++ b/synapse/rest/media/v1/storage_provider.py @@ -1,4 +1,4 @@ -# Copyright 2018-2021 The Matrix.org Foundation C.I.C. +# Copyright 2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,171 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# -import abc -import logging -import os -import shutil -from typing import TYPE_CHECKING, Callable, Optional - -from synapse.config._base import Config -from synapse.logging.context import defer_to_thread, run_in_background -from synapse.util.async_helpers import maybe_awaitable - -from ._base import FileInfo, Responder -from .media_storage import FileResponder - -logger = logging.getLogger(__name__) - -if TYPE_CHECKING: - from synapse.server import HomeServer - - -class StorageProvider(metaclass=abc.ABCMeta): - """A storage provider is a service that can store uploaded media and - retrieve them. - """ - - @abc.abstractmethod - async def store_file(self, path: str, file_info: FileInfo) -> None: - """Store the file described by file_info. The actual contents can be - retrieved by reading the file in file_info.upload_path. - - Args: - path: Relative path of file in local cache - file_info: The metadata of the file. - """ - - @abc.abstractmethod - async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: - """Attempt to fetch the file described by file_info and stream it - into writer. - - Args: - path: Relative path of file in local cache - file_info: The metadata of the file. - - Returns: - Returns a Responder if the provider has the file, otherwise returns None. - """ - - -class StorageProviderWrapper(StorageProvider): - """Wraps a storage provider and provides various config options - - Args: - backend: The storage provider to wrap. - store_local: Whether to store new local files or not. - store_synchronous: Whether to wait for file to be successfully - uploaded, or todo the upload in the background. - store_remote: Whether remote media should be uploaded - """ - - def __init__( - self, - backend: StorageProvider, - store_local: bool, - store_synchronous: bool, - store_remote: bool, - ): - self.backend = backend - self.store_local = store_local - self.store_synchronous = store_synchronous - self.store_remote = store_remote - - def __str__(self) -> str: - return "StorageProviderWrapper[%s]" % (self.backend,) - - async def store_file(self, path: str, file_info: FileInfo) -> None: - if not file_info.server_name and not self.store_local: - return None - - if file_info.server_name and not self.store_remote: - return None - - if file_info.url_cache: - # The URL preview cache is short lived and not worth offloading or - # backing up. - return None - - if self.store_synchronous: - # store_file is supposed to return an Awaitable, but guard - # against improper implementations. - await maybe_awaitable(self.backend.store_file(path, file_info)) # type: ignore - else: - # TODO: Handle errors. - async def store() -> None: - try: - return await maybe_awaitable( - self.backend.store_file(path, file_info) - ) - except Exception: - logger.exception("Error storing file") - - run_in_background(store) - - async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: - if file_info.url_cache: - # Files in the URL preview cache definitely aren't stored here, - # so avoid any potentially slow I/O or network access. - return None - - # store_file is supposed to return an Awaitable, but guard - # against improper implementations. - return await maybe_awaitable(self.backend.fetch(path, file_info)) - - -class FileStorageProviderBackend(StorageProvider): - """A storage provider that stores files in a directory on a filesystem. - - Args: - hs - config: The config returned by `parse_config`. - """ - - def __init__(self, hs: "HomeServer", config: str): - self.hs = hs - self.cache_directory = hs.config.media.media_store_path - self.base_directory = config - - def __str__(self) -> str: - return "FileStorageProviderBackend[%s]" % (self.base_directory,) - - async def store_file(self, path: str, file_info: FileInfo) -> None: - """See StorageProvider.store_file""" - - primary_fname = os.path.join(self.cache_directory, path) - backup_fname = os.path.join(self.base_directory, path) - - dirname = os.path.dirname(backup_fname) - os.makedirs(dirname, exist_ok=True) - - # mypy needs help inferring the type of the second parameter, which is generic - shutil_copyfile: Callable[[str, str], str] = shutil.copyfile - await defer_to_thread( - self.hs.get_reactor(), - shutil_copyfile, - primary_fname, - backup_fname, - ) - - async def fetch(self, path: str, file_info: FileInfo) -> Optional[Responder]: - """See StorageProvider.fetch""" - - backup_fname = os.path.join(self.base_directory, path) - if os.path.isfile(backup_fname): - return FileResponder(open(backup_fname, "rb")) - - return None - - @staticmethod - def parse_config(config: dict) -> str: - """Called on startup to parse config supplied. This should parse - the config and raise if there is a problem. - - The returned value is passed into the constructor. - - In this case we only care about a single param, the directory, so let's - just pull that out. - """ - return Config.ensure_directory(config["directory"]) +# This exists purely for backwards compatibility with media providers. +from synapse.media.storage_provider import StorageProvider # noqa: F401 diff --git a/synapse/server.py b/synapse/server.py index e5a347524763..a7c32e9a6092 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -105,6 +105,7 @@ from synapse.handlers.user_directory import UserDirectoryHandler from synapse.http.client import InsecureInterceptableContextFactory, SimpleHttpClient from synapse.http.matrixfederationclient import MatrixFederationHttpClient +from synapse.media.media_repository import MediaRepository from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager from synapse.module_api import ModuleApi from synapse.notifier import Notifier, ReplicationNotifier @@ -115,10 +116,7 @@ from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.replication.tcp.resource import ReplicationStreamer from synapse.replication.tcp.streams import STREAMS_MAP, Stream -from synapse.rest.media.v1.media_repository import ( - MediaRepository, - MediaRepositoryResource, -) +from synapse.rest.media.media_repository_resource import MediaRepositoryResource from synapse.server_notices.server_notices_manager import ServerNoticesManager from synapse.server_notices.server_notices_sender import ServerNoticesSender from synapse.server_notices.worker_server_notices_sender import ( diff --git a/tests/rest/media/v1/__init__.py b/tests/media/__init__.py similarity index 91% rename from tests/rest/media/v1/__init__.py rename to tests/media/__init__.py index b1ee10cfcc44..68910cbf5b96 100644 --- a/tests/rest/media/v1/__init__.py +++ b/tests/media/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2018 New Vector Ltd +# Copyright 2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/rest/media/v1/test_base.py b/tests/media/test_base.py similarity index 95% rename from tests/rest/media/v1/test_base.py rename to tests/media/test_base.py index c73179151adb..66498c744d6d 100644 --- a/tests/rest/media/v1/test_base.py +++ b/tests/media/test_base.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.rest.media.v1._base import get_filename_from_headers +from synapse.media._base import get_filename_from_headers from tests import unittest diff --git a/tests/rest/media/v1/test_filepath.py b/tests/media/test_filepath.py similarity index 99% rename from tests/rest/media/v1/test_filepath.py rename to tests/media/test_filepath.py index 43e6f0f70aad..95e3b83d5acb 100644 --- a/tests/rest/media/v1/test_filepath.py +++ b/tests/media/test_filepath.py @@ -15,7 +15,7 @@ import os from typing import Iterable -from synapse.rest.media.v1.filepath import MediaFilePaths, _wrap_with_jail_check +from synapse.media.filepath import MediaFilePaths, _wrap_with_jail_check from tests import unittest diff --git a/tests/rest/media/v1/test_html_preview.py b/tests/media/test_html_preview.py similarity index 99% rename from tests/rest/media/v1/test_html_preview.py rename to tests/media/test_html_preview.py index 1062081a060b..e7da75db3ee0 100644 --- a/tests/rest/media/v1/test_html_preview.py +++ b/tests/media/test_html_preview.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from synapse.rest.media.v1.preview_html import ( +from synapse.media.preview_html import ( _get_html_media_encodings, decode_body, parse_html_to_open_graph, diff --git a/tests/rest/media/v1/test_media_storage.py b/tests/media/test_media_storage.py similarity index 98% rename from tests/rest/media/v1/test_media_storage.py rename to tests/media/test_media_storage.py index 8ed27179c46a..870047d0f250 100644 --- a/tests/rest/media/v1/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -34,13 +34,13 @@ from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable +from synapse.media._base import FileInfo +from synapse.media.filepath import MediaFilePaths +from synapse.media.media_storage import MediaStorage, ReadableFileWrapper +from synapse.media.storage_provider import FileStorageProviderBackend from synapse.module_api import ModuleApi from synapse.rest import admin from synapse.rest.client import login -from synapse.rest.media.v1._base import FileInfo -from synapse.rest.media.v1.filepath import MediaFilePaths -from synapse.rest.media.v1.media_storage import MediaStorage, ReadableFileWrapper -from synapse.rest.media.v1.storage_provider import FileStorageProviderBackend from synapse.server import HomeServer from synapse.types import JsonDict, RoomAlias from synapse.util import Clock @@ -253,7 +253,7 @@ def write_to( config["max_image_pixels"] = 2000000 provider_config = { - "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend", + "module": "synapse.media.storage_provider.FileStorageProviderBackend", "store_local": True, "store_synchronous": False, "store_remote": True, diff --git a/tests/rest/media/v1/test_oembed.py b/tests/media/test_oembed.py similarity index 98% rename from tests/rest/media/v1/test_oembed.py rename to tests/media/test_oembed.py index 3f7f1dbab9b7..c8bf8421daf1 100644 --- a/tests/rest/media/v1/test_oembed.py +++ b/tests/media/test_oembed.py @@ -18,7 +18,7 @@ from twisted.test.proto_helpers import MemoryReactor -from synapse.rest.media.v1.oembed import OEmbedProvider, OEmbedResult +from synapse.media.oembed import OEmbedProvider, OEmbedResult from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index f41319a5b66f..6d04911d6740 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -20,8 +20,8 @@ import synapse.rest.admin from synapse.api.errors import Codes +from synapse.media.filepath import MediaFilePaths from synapse.rest.client import login, profile, room -from synapse.rest.media.v1.filepath import MediaFilePaths from synapse.server import HomeServer from synapse.util import Clock diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index f5b213219fa8..4b8f889a71ba 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -28,8 +28,8 @@ from synapse.api.constants import ApprovalNoticeMedium, LoginType, UserTypes from synapse.api.errors import Codes, HttpResponseException, ResourceLimitError from synapse.api.room_versions import RoomVersions +from synapse.media.filepath import MediaFilePaths from synapse.rest.client import devices, login, logout, profile, register, room, sync -from synapse.rest.media.v1.filepath import MediaFilePaths from synapse.server import HomeServer from synapse.types import JsonDict, UserID, create_requester from synapse.util import Clock diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/test_url_preview.py similarity index 99% rename from tests/rest/media/v1/test_url_preview.py rename to tests/rest/media/test_url_preview.py index 2acfccec6170..e91dc581c204 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -26,8 +26,8 @@ from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor from synapse.config.oembed import OEmbedEndpointConfig -from synapse.rest.media.v1.media_repository import MediaRepositoryResource -from synapse.rest.media.v1.preview_url_resource import IMAGE_CACHE_EXPIRY_MS +from synapse.rest.media.media_repository_resource import MediaRepositoryResource +from synapse.rest.media.preview_url_resource import IMAGE_CACHE_EXPIRY_MS from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -82,7 +82,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config["media_store_path"] = self.media_store_path provider_config = { - "module": "synapse.rest.media.v1.storage_provider.FileStorageProviderBackend", + "module": "synapse.media.storage_provider.FileStorageProviderBackend", "store_local": True, "store_synchronous": False, "store_remote": True, From b40657314e03583f45ad49504711698a70735313 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 27 Feb 2023 14:19:19 +0000 Subject: [PATCH 247/344] Add module API callbacks for adding and deleting local 3PID associations (#15044 --- changelog.d/15044.feature | 1 + docs/modules/third_party_rules_callbacks.md | 45 ++++++- docs/upgrade.md | 24 ++++ synapse/events/third_party_rules.py | 63 +++++++++ synapse/handlers/auth.py | 49 ++++--- synapse/handlers/deactivate_account.py | 20 +-- synapse/module_api/__init__.py | 10 ++ synapse/rest/admin/users.py | 11 +- synapse/rest/client/account.py | 9 +- .../storage/databases/main/registration.py | 13 -- tests/push/test_email.py | 6 +- tests/rest/client/test_third_party_rules.py | 121 ++++++++++++++++++ 12 files changed, 324 insertions(+), 48 deletions(-) create mode 100644 changelog.d/15044.feature diff --git a/changelog.d/15044.feature b/changelog.d/15044.feature new file mode 100644 index 000000000000..91e5cda8c3a9 --- /dev/null +++ b/changelog.d/15044.feature @@ -0,0 +1 @@ +Add two new Third Party Rules module API callbacks: [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier) and [`on_remove_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_remove_user_third_party_identifier). \ No newline at end of file diff --git a/docs/modules/third_party_rules_callbacks.md b/docs/modules/third_party_rules_callbacks.md index 888e43bd1051..4a27d976fb13 100644 --- a/docs/modules/third_party_rules_callbacks.md +++ b/docs/modules/third_party_rules_callbacks.md @@ -254,6 +254,11 @@ If multiple modules implement this callback, Synapse runs them all in order. _First introduced in Synapse v1.56.0_ +** +This callback is deprecated in favour of the `on_add_user_third_party_identifier` callback, which +features the same functionality. The only difference is in name. +** + ```python async def on_threepid_bind(user_id: str, medium: str, address: str) -> None: ``` @@ -268,6 +273,44 @@ server_. If multiple modules implement this callback, Synapse runs them all in order. +### `on_add_user_third_party_identifier` + +_First introduced in Synapse v1.79.0_ + +```python +async def on_add_user_third_party_identifier(user_id: str, medium: str, address: str) -> None: +``` + +Called after successfully creating an association between a user and a third-party identifier +(email address, phone number). The module is given the Matrix ID of the user the +association is for, as well as the medium (`email` or `msisdn`) and address of the +third-party identifier (i.e. an email address). + +Note that this callback is _not_ called if a user attempts to bind their third-party identifier +to an identity server (via a call to [`POST +/_matrix/client/v3/account/3pid/bind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidbind)). + +If multiple modules implement this callback, Synapse runs them all in order. + +### `on_remove_user_third_party_identifier` + +_First introduced in Synapse v1.79.0_ + +```python +async def on_remove_user_third_party_identifier(user_id: str, medium: str, address: str) -> None: +``` + +Called after successfully removing an association between a user and a third-party identifier +(email address, phone number). The module is given the Matrix ID of the user the +association is for, as well as the medium (`email` or `msisdn`) and address of the +third-party identifier (i.e. an email address). + +Note that this callback is _not_ called if a user attempts to unbind their third-party +identifier from an identity server (via a call to [`POST +/_matrix/client/v3/account/3pid/unbind`](https://spec.matrix.org/v1.5/client-server-api/#post_matrixclientv3account3pidunbind)). + +If multiple modules implement this callback, Synapse runs them all in order. + ## Example The example below is a module that implements the third-party rules callback @@ -300,4 +343,4 @@ class EventCensorer: ) event_dict["content"] = new_event_content return event_dict -``` +``` \ No newline at end of file diff --git a/docs/upgrade.md b/docs/upgrade.md index 15167b8c5825..f06e87405424 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,30 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.79.0 + +## The `on_threepid_bind` module callback method has been deprecated + +Synapse v1.79.0 deprecates the +[`on_threepid_bind`](modules/third_party_rules_callbacks.md#on_threepid_bind) +"third-party rules" Synapse module callback method in favour of a new module method, +[`on_add_user_third_party_identifier`](modules/third_party_rules_callbacks.md#on_add_user_third_party_identifier). +`on_threepid_bind` will be removed in a future version of Synapse. You should check whether any Synapse +modules in use in your deployment are making use of `on_threepid_bind`, and update them where possible. + +The arguments and functionality of the new method are the same. + +The justification behind the name change is that the old method's name, `on_threepid_bind`, was +misleading. A user is considered to "bind" their third-party ID to their Matrix ID only if they +do so via an [identity server](https://spec.matrix.org/latest/identity-service-api/) +(so that users on other homeservers may find them). But this method was not called in that case - +it was only called when a user added a third-party identifier on the local homeserver. + +Module developers may also be interested in the related +[`on_remove_user_third_party_identifier`](modules/third_party_rules_callbacks.md#on_remove_user_third_party_identifier) +module callback method that was also added in Synapse v1.79.0. This new method is called when a +user removes a third-party identifier from their account. + # Upgrading to v1.78.0 ## Deprecate the `/_synapse/admin/v1/media//delete` admin API diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 9a25ed419bb7..3e4d52c8d803 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -45,6 +45,8 @@ ON_PROFILE_UPDATE_CALLBACK = Callable[[str, ProfileInfo, bool, bool], Awaitable] ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK = Callable[[str, bool, bool], Awaitable] ON_THREEPID_BIND_CALLBACK = Callable[[str, str, str], Awaitable] +ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable] +ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK = Callable[[str, str, str], Awaitable] def load_legacy_third_party_event_rules(hs: "HomeServer") -> None: @@ -172,6 +174,12 @@ def __init__(self, hs: "HomeServer"): ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK ] = [] self._on_threepid_bind_callbacks: List[ON_THREEPID_BIND_CALLBACK] = [] + self._on_add_user_third_party_identifier_callbacks: List[ + ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = [] + self._on_remove_user_third_party_identifier_callbacks: List[ + ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = [] def register_third_party_rules_callbacks( self, @@ -191,6 +199,12 @@ def register_third_party_rules_callbacks( ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK ] = None, on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None, + on_add_user_third_party_identifier: Optional[ + ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = None, + on_remove_user_third_party_identifier: Optional[ + ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = None, ) -> None: """Register callbacks from modules for each hook.""" if check_event_allowed is not None: @@ -228,6 +242,11 @@ def register_third_party_rules_callbacks( if on_threepid_bind is not None: self._on_threepid_bind_callbacks.append(on_threepid_bind) + if on_add_user_third_party_identifier is not None: + self._on_add_user_third_party_identifier_callbacks.append( + on_add_user_third_party_identifier + ) + async def check_event_allowed( self, event: EventBase, @@ -511,6 +530,9 @@ async def on_threepid_bind(self, user_id: str, medium: str, address: str) -> Non local homeserver, not when it's created on an identity server (and then kept track of so that it can be unbound on the same IS later on). + THIS MODULE CALLBACK METHOD HAS BEEN DEPRECATED. Please use the + `on_add_user_third_party_identifier` callback method instead. + Args: user_id: the user being associated with the threepid. medium: the threepid's medium. @@ -523,3 +545,44 @@ async def on_threepid_bind(self, user_id: str, medium: str, address: str) -> Non logger.exception( "Failed to run module API callback %s: %s", callback, e ) + + async def on_add_user_third_party_identifier( + self, user_id: str, medium: str, address: str + ) -> None: + """Called when an association between a user's Matrix ID and a third-party ID + (email, phone number) has successfully been registered on the homeserver. + + Args: + user_id: The User ID included in the association. + medium: The medium of the third-party ID (email, msisdn). + address: The address of the third-party ID (i.e. an email address). + """ + for callback in self._on_add_user_third_party_identifier_callbacks: + try: + await callback(user_id, medium, address) + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) + + async def on_remove_user_third_party_identifier( + self, user_id: str, medium: str, address: str + ) -> None: + """Called when an association between a user's Matrix ID and a third-party ID + (email, phone number) has been successfully removed on the homeserver. + + This is called *after* any known bindings on identity servers for this + association have been removed. + + Args: + user_id: The User ID included in the removed association. + medium: The medium of the third-party ID (email, msisdn). + address: The address of the third-party ID (i.e. an email address). + """ + for callback in self._on_remove_user_third_party_identifier_callbacks: + try: + await callback(user_id, medium, address) + except Exception as e: + logger.exception( + "Failed to run module API callback %s: %s", callback, e + ) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index b12bc4c9a3a3..308e38edea2d 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1542,6 +1542,17 @@ async def delete_access_tokens_for_user( async def add_threepid( self, user_id: str, medium: str, address: str, validated_at: int ) -> None: + """ + Adds an association between a user's Matrix ID and a third-party ID (email, + phone number). + + Args: + user_id: The ID of the user to associate. + medium: The medium of the third-party ID (email, msisdn). + address: The address of the third-party ID (i.e. an email address). + validated_at: The timestamp in ms of when the validation that the user owns + this third-party ID occurred. + """ # check if medium has a valid value if medium not in ["email", "msisdn"]: raise SynapseError( @@ -1566,42 +1577,44 @@ async def add_threepid( user_id, medium, address, validated_at, self.hs.get_clock().time_msec() ) + # Inform Synapse modules that a 3PID association has been created. + await self._third_party_rules.on_add_user_third_party_identifier( + user_id, medium, address + ) + + # Deprecated method for informing Synapse modules that a 3PID association + # has successfully been created. await self._third_party_rules.on_threepid_bind(user_id, medium, address) - async def delete_threepid( - self, user_id: str, medium: str, address: str, id_server: Optional[str] = None - ) -> bool: - """Attempts to unbind the 3pid on the identity servers and deletes it - from the local database. + async def delete_local_threepid( + self, user_id: str, medium: str, address: str + ) -> None: + """Deletes an association between a third-party ID and a user ID from the local + database. This method does not unbind the association from any identity servers. + + If `medium` is 'email' and a pusher is associated with this third-party ID, the + pusher will also be deleted. Args: user_id: ID of user to remove the 3pid from. medium: The medium of the 3pid being removed: "email" or "msisdn". address: The 3pid address to remove. - id_server: Use the given identity server when unbinding - any threepids. If None then will attempt to unbind using the - identity server specified when binding (if known). - - Returns: - Returns True if successfully unbound the 3pid on - the identity server, False if identity server doesn't support the - unbind API. """ - # 'Canonicalise' email addresses as per above if medium == "email": address = canonicalise_email(address) - result = await self.hs.get_identity_handler().try_unbind_threepid( - user_id, medium, address, id_server + await self.store.user_delete_threepid(user_id, medium, address) + + # Inform Synapse modules that a 3PID association has been deleted. + await self._third_party_rules.on_remove_user_third_party_identifier( + user_id, medium, address ) - await self.store.user_delete_threepid(user_id, medium, address) if medium == "email": await self.store.delete_pusher_by_app_id_pushkey_user_id( app_id="m.email", pushkey=address, user_id=user_id ) - return result async def hash(self, password: str) -> str: """Computes a secure hash of password. diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index d24f64938219..d31263c717b5 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -100,26 +100,28 @@ async def deactivate_account( # unbinding identity_server_supports_unbinding = True - # Retrieve the 3PIDs this user has bound to an identity server - threepids = await self.store.user_get_bound_threepids(user_id) - - for threepid in threepids: + # Attempt to unbind any known bound threepids to this account from identity + # server(s). + bound_threepids = await self.store.user_get_bound_threepids(user_id) + for threepid in bound_threepids: try: result = await self._identity_handler.try_unbind_threepid( user_id, threepid["medium"], threepid["address"], id_server ) - identity_server_supports_unbinding &= result except Exception: # Do we want this to be a fatal error or should we carry on? logger.exception("Failed to remove threepid from ID server") raise SynapseError(400, "Failed to remove threepid from ID server") - await self.store.user_delete_threepid( + + identity_server_supports_unbinding &= result + + # Remove any local threepid associations for this account. + local_threepids = await self.store.user_get_threepids(user_id) + for threepid in local_threepids: + await self._auth_handler.delete_local_threepid( user_id, threepid["medium"], threepid["address"] ) - # Remove all 3PIDs this user has bound to the homeserver - await self.store.user_delete_threepids(user_id) - # delete any devices belonging to the user, which will also # delete corresponding access tokens. await self._device_handler.delete_all_devices_for_user(user_id) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 1964276a54fb..424239e3df86 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -64,9 +64,11 @@ CHECK_EVENT_ALLOWED_CALLBACK, CHECK_THREEPID_CAN_BE_INVITED_CALLBACK, CHECK_VISIBILITY_CAN_BE_MODIFIED_CALLBACK, + ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, ON_CREATE_ROOM_CALLBACK, ON_NEW_EVENT_CALLBACK, ON_PROFILE_UPDATE_CALLBACK, + ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK, ON_THREEPID_BIND_CALLBACK, ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK, ) @@ -357,6 +359,12 @@ def register_third_party_rules_callbacks( ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK ] = None, on_threepid_bind: Optional[ON_THREEPID_BIND_CALLBACK] = None, + on_add_user_third_party_identifier: Optional[ + ON_ADD_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = None, + on_remove_user_third_party_identifier: Optional[ + ON_REMOVE_USER_THIRD_PARTY_IDENTIFIER_CALLBACK + ] = None, ) -> None: """Registers callbacks for third party event rules capabilities. @@ -373,6 +381,8 @@ def register_third_party_rules_callbacks( on_profile_update=on_profile_update, on_user_deactivation_status_changed=on_user_deactivation_status_changed, on_threepid_bind=on_threepid_bind, + on_add_user_third_party_identifier=on_add_user_third_party_identifier, + on_remove_user_third_party_identifier=on_remove_user_third_party_identifier, ) def register_presence_router_callbacks( diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 7cc4db20d626..357e9a574da5 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -304,13 +304,20 @@ async def on_PUT( # remove old threepids for medium, address in del_threepids: try: - await self.auth_handler.delete_threepid( - user_id, medium, address, None + # Attempt to remove any known bindings of this third-party ID + # and user ID from identity servers. + await self.hs.get_identity_handler().try_unbind_threepid( + user_id, medium, address, id_server=None ) except Exception: logger.exception("Failed to remove threepids") raise SynapseError(500, "Failed to remove threepids") + # Delete the local association of this user ID and third-party ID. + await self.auth_handler.delete_local_threepid( + user_id, medium, address + ) + # add new threepids current_time = self.hs.get_clock().time_msec() for medium, address in add_threepids: diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 662f5bf7621b..484d7440a442 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -768,7 +768,9 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: user_id = requester.user.to_string() try: - ret = await self.auth_handler.delete_threepid( + # Attempt to remove any known bindings of this third-party ID + # and user ID from identity servers. + ret = await self.hs.get_identity_handler().try_unbind_threepid( user_id, body.medium, body.address, body.id_server ) except Exception: @@ -783,6 +785,11 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: else: id_server_unbind_result = "no-support" + # Delete the local association of this user ID and third-party ID. + await self.auth_handler.delete_local_threepid( + user_id, body.medium, body.address + ) + return 200, {"id_server_unbind_result": id_server_unbind_result} diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 9a55e17624f6..717237e02496 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -1002,19 +1002,6 @@ async def user_delete_threepid( desc="user_delete_threepid", ) - async def user_delete_threepids(self, user_id: str) -> None: - """Delete all threepid this user has bound - - Args: - user_id: The user id to delete all threepids of - - """ - await self.db_pool.simple_delete( - "user_threepids", - keyvalues={"user_id": user_id}, - desc="user_delete_threepids", - ) - async def add_user_bound_threepid( self, user_id: str, medium: str, address: str, id_server: str ) -> None: diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 0a3aca5c5076..4ea5472eb475 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -369,10 +369,8 @@ def test_no_email_sent_after_removed(self) -> None: # disassociate the user's email address self.get_success( - self.auth_handler.delete_threepid( - user_id=self.user_id, - medium="email", - address="a@example.com", + self.auth_handler.delete_local_threepid( + user_id=self.user_id, medium="email", address="a@example.com" ) ) diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index c0f93f898a79..3b9951370752 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -934,3 +934,124 @@ def test_on_threepid_bind(self) -> None: # Check that the mock was called with the right parameters self.assertEqual(args, (user_id, "email", "foo@example.com")) + + def test_on_add_and_remove_user_third_party_identifier(self) -> None: + """Tests that the on_add_user_third_party_identifier and + on_remove_user_third_party_identifier module callbacks are called + just before associating and removing a 3PID to/from an account. + """ + # Pretend to be a Synapse module and register both callbacks as mocks. + third_party_rules = self.hs.get_third_party_event_rules() + on_add_user_third_party_identifier_callback_mock = Mock( + return_value=make_awaitable(None) + ) + on_remove_user_third_party_identifier_callback_mock = Mock( + return_value=make_awaitable(None) + ) + third_party_rules._on_threepid_bind_callbacks.append( + on_add_user_third_party_identifier_callback_mock + ) + third_party_rules._on_threepid_bind_callbacks.append( + on_remove_user_third_party_identifier_callback_mock + ) + + # Register an admin user. + self.register_user("admin", "password", admin=True) + admin_tok = self.login("admin", "password") + + # Also register a normal user we can modify. + user_id = self.register_user("user", "password") + + # Add a 3PID to the user. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + { + "threepids": [ + { + "medium": "email", + "address": "foo@example.com", + }, + ], + }, + access_token=admin_tok, + ) + + # Check that the mocked add callback was called with the appropriate + # 3PID details. + self.assertEqual(channel.code, 200, channel.json_body) + on_add_user_third_party_identifier_callback_mock.assert_called_once() + args = on_add_user_third_party_identifier_callback_mock.call_args[0] + self.assertEqual(args, (user_id, "email", "foo@example.com")) + + # Now remove the 3PID from the user + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + { + "threepids": [], + }, + access_token=admin_tok, + ) + + # Check that the mocked remove callback was called with the appropriate + # 3PID details. + self.assertEqual(channel.code, 200, channel.json_body) + on_remove_user_third_party_identifier_callback_mock.assert_called_once() + args = on_remove_user_third_party_identifier_callback_mock.call_args[0] + self.assertEqual(args, (user_id, "email", "foo@example.com")) + + def test_on_remove_user_third_party_identifier_is_called_on_deactivate( + self, + ) -> None: + """Tests that the on_remove_user_third_party_identifier module callback is called + when a user is deactivated and their third-party ID associations are deleted. + """ + # Pretend to be a Synapse module and register both callbacks as mocks. + third_party_rules = self.hs.get_third_party_event_rules() + on_remove_user_third_party_identifier_callback_mock = Mock( + return_value=make_awaitable(None) + ) + third_party_rules._on_threepid_bind_callbacks.append( + on_remove_user_third_party_identifier_callback_mock + ) + + # Register an admin user. + self.register_user("admin", "password", admin=True) + admin_tok = self.login("admin", "password") + + # Also register a normal user we can modify. + user_id = self.register_user("user", "password") + + # Add a 3PID to the user. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + { + "threepids": [ + { + "medium": "email", + "address": "foo@example.com", + }, + ], + }, + access_token=admin_tok, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # Now deactivate the user. + channel = self.make_request( + "PUT", + "/_synapse/admin/v2/users/%s" % user_id, + { + "deactivated": True, + }, + access_token=admin_tok, + ) + + # Check that the mocked remove callback was called with the appropriate + # 3PID details. + self.assertEqual(channel.code, 200, channel.json_body) + on_remove_user_third_party_identifier_callback_mock.assert_called_once() + args = on_remove_user_third_party_identifier_callback_mock.call_args[0] + self.assertEqual(args, (user_id, "email", "foo@example.com")) From 189a878a355f0c3933b24c7d7a4d22b11400f7f0 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 27 Feb 2023 13:08:18 -0700 Subject: [PATCH 248/344] Remove dangling reference to being a reference implementation (#15167) * Remove dangling reference to being a reference implementation * Create 15167.misc --- changelog.d/15167.misc | 1 + synapse/__init__.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15167.misc diff --git a/changelog.d/15167.misc b/changelog.d/15167.misc new file mode 100644 index 000000000000..175c2a3b8385 --- /dev/null +++ b/changelog.d/15167.misc @@ -0,0 +1 @@ +Remove dangling reference to being a reference implementation in docstring. diff --git a/synapse/__init__.py b/synapse/__init__.py index fbfd506a43d0..a203ed533ae5 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -1,5 +1,6 @@ # Copyright 2014-2016 OpenMarket Ltd -# Copyright 2018-9 New Vector Ltd +# Copyright 2018-2019 New Vector Ltd +# Copyright 2023 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" This is a reference implementation of a Matrix homeserver. +""" This is an implementation of a Matrix homeserver. """ import json From 1cd4fbc51d5c8ffc170dd3665ef81c03cc4a65e8 Mon Sep 17 00:00:00 2001 From: Evan Krall Date: Tue, 28 Feb 2023 03:09:31 -0800 Subject: [PATCH 249/344] Correct documentation about registration_shared_secret_path (#15168) * Correct documentation about registration_shared_secret_path * Create 15168.doc * Update changelog.d/15168.doc --------- Co-authored-by: David Robertson --- changelog.d/15168.doc | 1 + docs/usage/configuration/config_documentation.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15168.doc diff --git a/changelog.d/15168.doc b/changelog.d/15168.doc new file mode 100644 index 000000000000..dbd3c54714e4 --- /dev/null +++ b/changelog.d/15168.doc @@ -0,0 +1 @@ +Correct the description of the behavior of `registration_shared_secret_path` on startup. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 4139961810f4..015855ee7ef4 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -2227,8 +2227,8 @@ allows the shared secret to be specified in an external file. The file should be a plain text file, containing only the shared secret. -If this file does not exist, Synapse will create a new signing -key on startup and store it in this file. +If this file does not exist, Synapse will create a new shared +secret on startup and store it in this file. Example configuration: ```yaml From 93f7955eba50c827f96e1b2e8e44ef22a98cecc4 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Tue, 28 Feb 2023 13:09:10 +0100 Subject: [PATCH 250/344] Admin API endpoint to delete a reported event (#15116) * Admin api to delete event report * lint + tests * newsfile * Apply suggestions from code review Co-authored-by: David Robertson * revert changes - move to WorkerStore * update unit test * Note that timestamp is in millseconds --------- Co-authored-by: David Robertson --- changelog.d/15116.feature | 1 + docs/admin_api/event_reports.md | 14 +++ synapse/rest/admin/event_reports.py | 41 +++++-- synapse/storage/databases/main/room.py | 36 ++++++- tests/rest/admin/test_event_reports.py | 143 ++++++++++++++++++++++++- 5 files changed, 224 insertions(+), 11 deletions(-) create mode 100644 changelog.d/15116.feature diff --git a/changelog.d/15116.feature b/changelog.d/15116.feature new file mode 100644 index 000000000000..087d8dc7f18e --- /dev/null +++ b/changelog.d/15116.feature @@ -0,0 +1 @@ +Add an [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) to delete a [specific event report](https://spec.matrix.org/v1.6/client-server-api/#reporting-content). \ No newline at end of file diff --git a/docs/admin_api/event_reports.md b/docs/admin_api/event_reports.md index beec8bb7efe9..83f7dc37f41a 100644 --- a/docs/admin_api/event_reports.md +++ b/docs/admin_api/event_reports.md @@ -169,3 +169,17 @@ The following fields are returned in the JSON response body: * `canonical_alias`: string - The canonical alias of the room. `null` if the room does not have a canonical alias set. * `event_json`: object - Details of the original event that was reported. + +# Delete a specific event report + +This API deletes a specific event report. If the request is successful, the response body +will be an empty JSON object. + +The api is: +``` +DELETE /_synapse/admin/v1/event_reports/ +``` + +**URL parameters:** + +* `report_id`: string - The ID of the event report. diff --git a/synapse/rest/admin/event_reports.py b/synapse/rest/admin/event_reports.py index a3beb74e2c3d..c546ef7e2305 100644 --- a/synapse/rest/admin/event_reports.py +++ b/synapse/rest/admin/event_reports.py @@ -53,11 +53,11 @@ class EventReportsRestServlet(RestServlet): PATTERNS = admin_patterns("/event_reports$") def __init__(self, hs: "HomeServer"): - self.auth = hs.get_auth() - self.store = hs.get_datastores().main + self._auth = hs.get_auth() + self._store = hs.get_datastores().main async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: - await assert_requester_is_admin(self.auth, request) + await assert_requester_is_admin(self._auth, request) start = parse_integer(request, "from", default=0) limit = parse_integer(request, "limit", default=100) @@ -79,7 +79,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: errcode=Codes.INVALID_PARAM, ) - event_reports, total = await self.store.get_event_reports_paginate( + event_reports, total = await self._store.get_event_reports_paginate( start, limit, direction, user_id, room_id ) ret = {"event_reports": event_reports, "total": total} @@ -108,13 +108,13 @@ class EventReportDetailRestServlet(RestServlet): PATTERNS = admin_patterns("/event_reports/(?P[^/]*)$") def __init__(self, hs: "HomeServer"): - self.auth = hs.get_auth() - self.store = hs.get_datastores().main + self._auth = hs.get_auth() + self._store = hs.get_datastores().main async def on_GET( self, request: SynapseRequest, report_id: str ) -> Tuple[int, JsonDict]: - await assert_requester_is_admin(self.auth, request) + await assert_requester_is_admin(self._auth, request) message = ( "The report_id parameter must be a string representing a positive integer." @@ -131,8 +131,33 @@ async def on_GET( HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM ) - ret = await self.store.get_event_report(resolved_report_id) + ret = await self._store.get_event_report(resolved_report_id) if not ret: raise NotFoundError("Event report not found") return HTTPStatus.OK, ret + + async def on_DELETE( + self, request: SynapseRequest, report_id: str + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + + message = ( + "The report_id parameter must be a string representing a positive integer." + ) + try: + resolved_report_id = int(report_id) + except ValueError: + raise SynapseError( + HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM + ) + + if resolved_report_id < 0: + raise SynapseError( + HTTPStatus.BAD_REQUEST, message, errcode=Codes.INVALID_PARAM + ) + + if await self._store.delete_event_report(resolved_report_id): + return HTTPStatus.OK, {} + + raise NotFoundError("Event report not found") diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 39f89291b2ba..a2e9519cb6c4 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1417,6 +1417,27 @@ def get_un_partial_stated_rooms_from_stream_txn( get_un_partial_stated_rooms_from_stream_txn, ) + async def delete_event_report(self, report_id: int) -> bool: + """Remove an event report from database. + + Args: + report_id: Report to delete + + Returns: + Whether the report was successfully deleted or not. + """ + try: + await self.db_pool.simple_delete_one( + table="event_reports", + keyvalues={"id": report_id}, + desc="delete_event_report", + ) + except StoreError: + # Deletion failed because report does not exist + return False + + return True + class _BackgroundUpdates: REMOVE_TOMESTONED_ROOMS_BG_UPDATE = "remove_tombstoned_rooms_from_directory" @@ -2139,7 +2160,19 @@ async def add_event_report( reason: Optional[str], content: JsonDict, received_ts: int, - ) -> None: + ) -> int: + """Add an event report + + Args: + room_id: Room that contains the reported event. + event_id: The reported event. + user_id: User who reports the event. + reason: Description that the user specifies. + content: Report request body (score and reason). + received_ts: Time when the user submitted the report (milliseconds). + Returns: + Id of the event report. + """ next_id = self._event_reports_id_gen.get_next() await self.db_pool.simple_insert( table="event_reports", @@ -2154,6 +2187,7 @@ async def add_event_report( }, desc="add_event_report", ) + return next_id async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]: """Retrieve an event report diff --git a/tests/rest/admin/test_event_reports.py b/tests/rest/admin/test_event_reports.py index 233eba351690..f189b0776910 100644 --- a/tests/rest/admin/test_event_reports.py +++ b/tests/rest/admin/test_event_reports.py @@ -78,7 +78,7 @@ def test_no_auth(self) -> None: """ Try to get an event report without authentication. """ - channel = self.make_request("GET", self.url, b"{}") + channel = self.make_request("GET", self.url, {}) self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -473,7 +473,7 @@ def test_no_auth(self) -> None: """ Try to get event report without authentication. """ - channel = self.make_request("GET", self.url, b"{}") + channel = self.make_request("GET", self.url, {}) self.assertEqual(401, channel.code, msg=channel.json_body) self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) @@ -599,3 +599,142 @@ def _check_fields(self, content: JsonDict) -> None: self.assertIn("room_id", content["event_json"]) self.assertIn("sender", content["event_json"]) self.assertIn("content", content["event_json"]) + + +class DeleteEventReportTestCase(unittest.HomeserverTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self._store = hs.get_datastores().main + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.other_user_tok = self.login("user", "pass") + + # create report + event_id = self.get_success( + self._store.add_event_report( + "room_id", + "event_id", + self.other_user, + "this makes me sad", + {}, + self.clock.time_msec(), + ) + ) + + self.url = f"/_synapse/admin/v1/event_reports/{event_id}" + + def test_no_auth(self) -> None: + """ + Try to delete event report without authentication. + """ + channel = self.make_request("DELETE", self.url) + + self.assertEqual(401, channel.code, msg=channel.json_body) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_requester_is_no_admin(self) -> None: + """ + If the user is not a server admin, an error 403 is returned. + """ + + channel = self.make_request( + "DELETE", + self.url, + access_token=self.other_user_tok, + ) + + self.assertEqual(403, channel.code, msg=channel.json_body) + self.assertEqual(Codes.FORBIDDEN, channel.json_body["errcode"]) + + def test_delete_success(self) -> None: + """ + Testing delete a report. + """ + + channel = self.make_request( + "DELETE", + self.url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual({}, channel.json_body) + + channel = self.make_request( + "GET", + self.url, + access_token=self.admin_user_tok, + ) + + # check that report was deleted + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_invalid_report_id(self) -> None: + """ + Testing that an invalid `report_id` returns a 400. + """ + + # `report_id` is negative + channel = self.make_request( + "DELETE", + "/_synapse/admin/v1/event_reports/-123", + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "The report_id parameter must be a string representing a positive integer.", + channel.json_body["error"], + ) + + # `report_id` is a non-numerical string + channel = self.make_request( + "DELETE", + "/_synapse/admin/v1/event_reports/abcdef", + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "The report_id parameter must be a string representing a positive integer.", + channel.json_body["error"], + ) + + # `report_id` is undefined + channel = self.make_request( + "DELETE", + "/_synapse/admin/v1/event_reports/", + access_token=self.admin_user_tok, + ) + + self.assertEqual(400, channel.code, msg=channel.json_body) + self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) + self.assertEqual( + "The report_id parameter must be a string representing a positive integer.", + channel.json_body["error"], + ) + + def test_report_id_not_found(self) -> None: + """ + Testing that a not existing `report_id` returns a 404. + """ + + channel = self.make_request( + "DELETE", + "/_synapse/admin/v1/event_reports/123", + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + self.assertEqual("Event report not found", channel.json_body["error"]) From 521026897c3278344f76d9a7f0555acb49a724fb Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 28 Feb 2023 14:16:33 +0000 Subject: [PATCH 251/344] Add documentation for caching in a module (#14026) * Add documentation for caching in a module * Changelog * Formatting * Wrap lines at a length that mdbook is happier with * Typo fix Co-authored-by: Erik Johnston * Link to recent version of the API In the longer term I'd like to see us generate markdown with Sphinx. * Refer to public `cached` decorator * Mark caching as being added in 1.74 Some of the underlying infrastructure was added in 1.69, but the public-facing `cached` decorator was only added in 1.74. It is the latter that I think we should be advertising. * Update docs/modules/writing_a_module.md Co-authored-by: Patrick Cloke --------- Co-authored-by: David Robertson Co-authored-by: Erik Johnston Co-authored-by: Patrick Cloke --- changelog.d/14026.doc | 1 + docs/modules/writing_a_module.md | 56 ++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100644 changelog.d/14026.doc diff --git a/changelog.d/14026.doc b/changelog.d/14026.doc new file mode 100644 index 000000000000..28fc5568ea0b --- /dev/null +++ b/changelog.d/14026.doc @@ -0,0 +1 @@ +Document how to use caches in a module. diff --git a/docs/modules/writing_a_module.md b/docs/modules/writing_a_module.md index 30de69a53387..b99f64b9d8e8 100644 --- a/docs/modules/writing_a_module.md +++ b/docs/modules/writing_a_module.md @@ -83,3 +83,59 @@ the callback name as the argument name and the function as its value. A Callbacks for each category can be found on their respective page of the [Synapse documentation website](https://matrix-org.github.io/synapse). + +## Caching + +_Added in Synapse 1.74.0._ + +Modules can leverage Synapse's caching tools to manage their own cached functions. This +can be helpful for modules that need to repeatedly request the same data from the database +or a remote service. + +Functions that need to be wrapped with a cache need to be decorated with a `@cached()` +decorator (which can be imported from `synapse.module_api`) and registered with the +[`ModuleApi.register_cached_function`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L888) +API when initialising the module. If the module needs to invalidate an entry in a cache, +it needs to use the [`ModuleApi.invalidate_cache`](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L904) +API, with the function to invalidate the cache of and the key(s) of the entry to +invalidate. + +Below is an example of a simple module using a cached function: + +```python +from typing import Any +from synapse.module_api import cached, ModuleApi + +class MyModule: + def __init__(self, config: Any, api: ModuleApi): + self.api = api + + # Register the cached function so Synapse knows how to correctly invalidate + # entries for it. + self.api.register_cached_function(self.get_user_from_id) + + @cached() + async def get_department_for_user(self, user_id: str) -> str: + """A function with a cache.""" + # Request a department from an external service. + return await self.http_client.get_json( + "https://int.example.com/users", {"user_id": user_id) + )["department"] + + async def do_something_with_users(self) -> None: + """Calls the cached function and then invalidates an entry in its cache.""" + + user_id = "@alice:example.com" + + # Get the user. Since get_department_for_user is wrapped with a cache, + # the return value for this user_id will be cached. + department = await self.get_department_for_user(user_id) + + # Do something with `department`... + + # Let's say something has changed with our user, and the entry we have for + # them in the cache is out of date, so we want to invalidate it. + await self.api.invalidate_cache(self.get_department_for_user, (user_id,)) +``` + +See the [`cached` docstring](https://github.com/matrix-org/synapse/blob/release-v1.77/synapse/module_api/__init__.py#L190) for more details. From e746f80b4fd57fb0296c06c11c8d1240fe118c45 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 28 Feb 2023 10:11:20 -0500 Subject: [PATCH 252/344] Do not accept pattern_type from user input in push rules. (#15088) Internally the push rules module uses a `pattern_type` property for `event_match` conditions (and `related_event_match`) to mark the condition as matching the current user's Matrix ID or localpart. This is leaky to the Client-Server API where a user can successfully set a condition which provides `pattern_type` instead of `pattern` (note that there's no benefit to doing this -- the user can just use their own Matrix ID or localpart instead). When serializing back to the client the `pattern_type` property is converted into a proper `pattern`. The following changes are made to avoid this: * Separate the `KnownCondition::EventMatch` enum value into `EventMatch` and `EventMatchType`, each with their own expected properties. (Note that a similar change is made for `RelatedEventMatch`.) * Make it such that the `pattern_type` variants serialize to the same condition kind, but cannot be deserialized (since they're only provided by base rules). * As a final tweak, convert `user_id` vs. `user_localpart` values into an enum. --- changelog.d/15088.bugfix | 1 + rust/benches/evaluator.rs | 9 +- rust/src/push/base_rules.rs | 135 ++++++++------------- rust/src/push/evaluator.rs | 155 +++++++++++-------------- rust/src/push/mod.rs | 103 ++++++++++++++-- tests/push/test_push_rule_evaluator.py | 27 +++++ 6 files changed, 244 insertions(+), 186 deletions(-) create mode 100644 changelog.d/15088.bugfix diff --git a/changelog.d/15088.bugfix b/changelog.d/15088.bugfix new file mode 100644 index 000000000000..15d5286f80f7 --- /dev/null +++ b/changelog.d/15088.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where Synapse handled an unspecced field on push rules. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index efd19a216541..9a871f569334 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -60,8 +60,7 @@ fn bench_match_exact(b: &mut Bencher) { let condition = Condition::Known(synapse::push::KnownCondition::EventMatch( EventMatchCondition { key: "room_id".into(), - pattern: Some("!room:server".into()), - pattern_type: None, + pattern: "!room:server".into(), }, )); @@ -109,8 +108,7 @@ fn bench_match_word(b: &mut Bencher) { let condition = Condition::Known(synapse::push::KnownCondition::EventMatch( EventMatchCondition { key: "content.body".into(), - pattern: Some("test".into()), - pattern_type: None, + pattern: "test".into(), }, )); @@ -158,8 +156,7 @@ fn bench_match_word_miss(b: &mut Bencher) { let condition = Condition::Known(synapse::push::KnownCondition::EventMatch( EventMatchCondition { key: "content.body".into(), - pattern: Some("foobar".into()), - pattern_type: None, + pattern: "foobar".into(), }, )); diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 4a62b9696f9d..62de51d91528 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -21,13 +21,13 @@ use lazy_static::lazy_static; use serde_json::Value; use super::KnownCondition; -use crate::push::Condition; -use crate::push::EventMatchCondition; use crate::push::PushRule; -use crate::push::RelatedEventMatchCondition; +use crate::push::RelatedEventMatchTypeCondition; use crate::push::SetTweak; use crate::push::TweakValue; use crate::push::{Action, ExactEventMatchCondition, SimpleJsonValue}; +use crate::push::{Condition, EventMatchTypeCondition}; +use crate::push::{EventMatchCondition, EventMatchPatternType}; const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak { set_tweak: Cow::Borrowed("highlight"), @@ -72,8 +72,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("content.m.relates_to.rel_type"), - pattern: Some(Cow::Borrowed("m.replace")), - pattern_type: None, + pattern: Cow::Borrowed("m.replace"), }, ))]), actions: Cow::Borrowed(&[]), @@ -86,8 +85,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("content.msgtype"), - pattern: Some(Cow::Borrowed("m.notice")), - pattern_type: None, + pattern: Cow::Borrowed("m.notice"), }, ))]), actions: Cow::Borrowed(&[Action::DontNotify]), @@ -100,18 +98,15 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.member")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.member"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("content.membership"), - pattern: Some(Cow::Borrowed("invite")), - pattern_type: None, + pattern: Cow::Borrowed("invite"), })), - Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + Condition::Known(KnownCondition::EventMatchType(EventMatchTypeCondition { key: Cow::Borrowed("state_key"), - pattern: None, - pattern_type: Some(Cow::Borrowed("user_id")), + pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId), })), ]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION, SOUND_ACTION]), @@ -124,8 +119,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.member")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.member"), }, ))]), actions: Cow::Borrowed(&[Action::DontNotify]), @@ -135,11 +129,10 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ PushRule { rule_id: Cow::Borrowed("global/override/.im.nheko.msc3664.reply"), priority_class: 5, - conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatch( - RelatedEventMatchCondition { - key: Some(Cow::Borrowed("sender")), - pattern: None, - pattern_type: Some(Cow::Borrowed("user_id")), + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelatedEventMatchType( + RelatedEventMatchTypeCondition { + key: Cow::Borrowed("sender"), + pattern_type: Cow::Borrowed(&EventMatchPatternType::UserId), rel_type: Cow::Borrowed("m.in_reply_to"), include_fallbacks: None, }, @@ -189,8 +182,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ }), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("content.body"), - pattern: Some(Cow::Borrowed("@room")), - pattern_type: None, + pattern: Cow::Borrowed("@room"), })), ]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]), @@ -203,13 +195,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.tombstone")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.tombstone"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("state_key"), - pattern: Some(Cow::Borrowed("")), - pattern_type: None, + pattern: Cow::Borrowed(""), })), ]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]), @@ -222,8 +212,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.reaction")), - pattern_type: None, + pattern: Cow::Borrowed("m.reaction"), }, ))]), actions: Cow::Borrowed(&[]), @@ -236,13 +225,11 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.server_acl")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.server_acl"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("state_key"), - pattern: Some(Cow::Borrowed("")), - pattern_type: None, + pattern: Cow::Borrowed(""), })), ]), actions: Cow::Borrowed(&[]), @@ -255,8 +242,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.response")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc3381.poll.response"), }, ))]), actions: Cow::Borrowed(&[]), @@ -268,11 +254,10 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule { rule_id: Cow::Borrowed("global/content/.m.rule.contains_user_name"), priority_class: 4, - conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( - EventMatchCondition { + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatchType( + EventMatchTypeCondition { key: Cow::Borrowed("content.body"), - pattern: None, - pattern_type: Some(Cow::Borrowed("user_localpart")), + pattern_type: Cow::Borrowed(&EventMatchPatternType::UserLocalpart), }, ))]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), @@ -287,8 +272,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.call.invite")), - pattern_type: None, + pattern: Cow::Borrowed("m.call.invite"), }, ))]), actions: Cow::Borrowed(&[Action::Notify, RING_ACTION, HIGHLIGHT_FALSE_ACTION]), @@ -301,8 +285,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.message")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.message"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -318,8 +301,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.encrypted")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.encrypted"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -338,8 +320,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.encrypted"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -363,8 +344,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.message"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -388,8 +368,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.file"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -413,8 +392,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.image"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -438,8 +416,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.video"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -463,8 +440,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc1767.audio"), })), Condition::Known(KnownCondition::RoomMemberCount { is: Some(Cow::Borrowed("2")), @@ -485,8 +461,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.message")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.message"), }, ))]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), @@ -499,8 +474,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("m.room.encrypted")), - pattern_type: None, + pattern: Cow::Borrowed("m.room.encrypted"), }, ))]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), @@ -514,8 +488,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.encrypted")), - pattern_type: None, + pattern: Cow::Borrowed("m.encrypted"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -534,8 +507,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.message")), - pattern_type: None, + pattern: Cow::Borrowed("m.message"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -554,8 +526,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.file")), - pattern_type: None, + pattern: Cow::Borrowed("m.file"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -574,8 +545,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.image")), - pattern_type: None, + pattern: Cow::Borrowed("m.image"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -594,8 +564,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.video")), - pattern_type: None, + pattern: Cow::Borrowed("m.video"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -614,8 +583,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), // MSC3933: Type changed from template rule - see MSC. - pattern: Some(Cow::Borrowed("m.audio")), - pattern_type: None, + pattern: Cow::Borrowed("m.audio"), })), // MSC3933: Add condition on top of template rule - see MSC. Condition::Known(KnownCondition::RoomVersionSupports { @@ -633,18 +601,15 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("im.vector.modular.widgets")), - pattern_type: None, + pattern: Cow::Borrowed("im.vector.modular.widgets"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("content.type"), - pattern: Some(Cow::Borrowed("jitsi")), - pattern_type: None, + pattern: Cow::Borrowed("jitsi"), })), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("state_key"), - pattern: Some(Cow::Borrowed("*")), - pattern_type: None, + pattern: Cow::Borrowed("*"), })), ]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), @@ -660,8 +625,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ }), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc3381.poll.start"), })), ]), actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]), @@ -674,8 +638,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.start")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc3381.poll.start"), }, ))]), actions: Cow::Borrowed(&[Action::Notify]), @@ -691,8 +654,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ }), Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc3381.poll.end"), })), ]), actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION]), @@ -705,8 +667,7 @@ pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { key: Cow::Borrowed("type"), - pattern: Some(Cow::Borrowed("org.matrix.msc3381.poll.end")), - pattern_type: None, + pattern: Cow::Borrowed("org.matrix.msc3381.poll.end"), }, ))]), actions: Cow::Borrowed(&[Action::Notify]), diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 55551ecb56a7..a65c645cafe5 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -12,9 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::borrow::Cow; use std::collections::{BTreeMap, BTreeSet}; -use crate::push::JsonValue; +use crate::push::{EventMatchPatternType, JsonValue}; use anyhow::{Context, Error}; use lazy_static::lazy_static; use log::warn; @@ -23,8 +24,8 @@ use regex::Regex; use super::{ utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType}, - Action, Condition, EventMatchCondition, ExactEventMatchCondition, FilteredPushRules, - KnownCondition, RelatedEventMatchCondition, SimpleJsonValue, + Action, Condition, ExactEventMatchCondition, FilteredPushRules, KnownCondition, + SimpleJsonValue, }; lazy_static! { @@ -256,14 +257,58 @@ impl PushRuleEvaluator { }; let result = match known_condition { - KnownCondition::EventMatch(event_match) => { - self.match_event_match(event_match, user_id)? + KnownCondition::EventMatch(event_match) => self.match_event_match( + &self.flattened_keys, + &event_match.key, + &event_match.pattern, + )?, + KnownCondition::EventMatchType(event_match) => { + // The `pattern_type` can either be "user_id" or "user_localpart", + // either way if we don't have a `user_id` then the condition can't + // match. + let user_id = if let Some(user_id) = user_id { + user_id + } else { + return Ok(false); + }; + + let pattern = match &*event_match.pattern_type { + EventMatchPatternType::UserId => user_id, + EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?, + }; + + self.match_event_match(&self.flattened_keys, &event_match.key, pattern)? } KnownCondition::ExactEventMatch(exact_event_match) => { self.match_exact_event_match(exact_event_match)? } - KnownCondition::RelatedEventMatch(event_match) => { - self.match_related_event_match(event_match, user_id)? + KnownCondition::RelatedEventMatch(event_match) => self.match_related_event_match( + &event_match.rel_type.clone(), + event_match.include_fallbacks, + event_match.key.clone(), + event_match.pattern.clone(), + )?, + KnownCondition::RelatedEventMatchType(event_match) => { + // The `pattern_type` can either be "user_id" or "user_localpart", + // either way if we don't have a `user_id` then the condition can't + // match. + let user_id = if let Some(user_id) = user_id { + user_id + } else { + return Ok(false); + }; + + let pattern = match &*event_match.pattern_type { + EventMatchPatternType::UserId => user_id, + EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?, + }; + + self.match_related_event_match( + &event_match.rel_type.clone(), + event_match.include_fallbacks, + Some(event_match.key.clone()), + Some(Cow::Borrowed(pattern)), + )? } KnownCondition::ExactEventPropertyContains(exact_event_match) => { self.match_exact_event_property_contains(exact_event_match)? @@ -325,32 +370,12 @@ impl PushRuleEvaluator { /// Evaluates a `event_match` condition. fn match_event_match( &self, - event_match: &EventMatchCondition, - user_id: Option<&str>, + flattened_event: &BTreeMap, + key: &str, + pattern: &str, ) -> Result { - let pattern = if let Some(pattern) = &event_match.pattern { - pattern - } else if let Some(pattern_type) = &event_match.pattern_type { - // The `pattern_type` can either be "user_id" or "user_localpart", - // either way if we don't have a `user_id` then the condition can't - // match. - let user_id = if let Some(user_id) = user_id { - user_id - } else { - return Ok(false); - }; - - match &**pattern_type { - "user_id" => user_id, - "user_localpart" => get_localpart_from_id(user_id)?, - _ => return Ok(false), - } - } else { - return Ok(false); - }; - let haystack = if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) = - self.flattened_keys.get(&*event_match.key) + flattened_event.get(key) { haystack } else { @@ -359,7 +384,7 @@ impl PushRuleEvaluator { // For the content.body we match against "words", but for everything // else we match against the entire value. - let match_type = if event_match.key == "content.body" { + let match_type = if key == "content.body" { GlobMatchType::Word } else { GlobMatchType::Whole @@ -395,8 +420,10 @@ impl PushRuleEvaluator { /// Evaluates a `related_event_match` condition. (MSC3664) fn match_related_event_match( &self, - event_match: &RelatedEventMatchCondition, - user_id: Option<&str>, + rel_type: &str, + include_fallbacks: Option, + key: Option>, + pattern: Option>, ) -> Result { // First check if related event matching is enabled... if !self.related_event_match_enabled { @@ -404,7 +431,7 @@ impl PushRuleEvaluator { } // get the related event, fail if there is none. - let event = if let Some(event) = self.related_events_flattened.get(&*event_match.rel_type) { + let event = if let Some(event) = self.related_events_flattened.get(rel_type) { event } else { return Ok(false); @@ -412,58 +439,18 @@ impl PushRuleEvaluator { // If we are not matching fallbacks, don't match if our special key indicating this is a // fallback relation is not present. - if !event_match.include_fallbacks.unwrap_or(false) - && event.contains_key("im.vector.is_falling_back") - { + if !include_fallbacks.unwrap_or(false) && event.contains_key("im.vector.is_falling_back") { return Ok(false); } - // if we have no key, accept the event as matching, if it existed without matching any - // fields. - let key = if let Some(key) = &event_match.key { - key - } else { - return Ok(true); - }; - - let pattern = if let Some(pattern) = &event_match.pattern { - pattern - } else if let Some(pattern_type) = &event_match.pattern_type { - // The `pattern_type` can either be "user_id" or "user_localpart", - // either way if we don't have a `user_id` then the condition can't - // match. - let user_id = if let Some(user_id) = user_id { - user_id - } else { - return Ok(false); - }; - - match &**pattern_type { - "user_id" => user_id, - "user_localpart" => get_localpart_from_id(user_id)?, - _ => return Ok(false), - } - } else { - return Ok(false); - }; - - let haystack = - if let Some(JsonValue::Value(SimpleJsonValue::Str(haystack))) = event.get(&**key) { - haystack - } else { - return Ok(false); - }; - - // For the content.body we match against "words", but for everything - // else we match against the entire value. - let match_type = if key == "content.body" { - GlobMatchType::Word - } else { - GlobMatchType::Whole - }; - - let mut compiled_pattern = get_glob_matcher(pattern, match_type)?; - compiled_pattern.is_match(haystack) + match (key, pattern) { + // if we have no key, accept the event as matching. + (None, _) => Ok(true), + // There was a key, so we *must* have a pattern to go with it. + (Some(_), None) => Ok(false), + // If there is a key & pattern, check if they're in the flattened event (given by rel_type). + (Some(key), Some(pattern)) => self.match_event_match(event, &key, &pattern), + } } /// Evaluates a `exact_event_property_contains` condition. (MSC3758) diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index fdd2b2c1439b..97feb6efc924 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -328,10 +328,16 @@ pub enum Condition { #[serde(tag = "kind")] pub enum KnownCondition { EventMatch(EventMatchCondition), + // Identical to event_match but gives predefined patterns. Cannot be added by users. + #[serde(skip_deserializing, rename = "event_match")] + EventMatchType(EventMatchTypeCondition), #[serde(rename = "com.beeper.msc3758.exact_event_match")] ExactEventMatch(ExactEventMatchCondition), #[serde(rename = "im.nheko.msc3664.related_event_match")] RelatedEventMatch(RelatedEventMatchCondition), + // Identical to related_event_match but gives predefined patterns. Cannot be added by users. + #[serde(skip_deserializing, rename = "im.nheko.msc3664.related_event_match")] + RelatedEventMatchType(RelatedEventMatchTypeCondition), #[serde(rename = "org.matrix.msc3966.exact_event_property_contains")] ExactEventPropertyContains(ExactEventMatchCondition), #[serde(rename = "org.matrix.msc3952.is_user_mention")] @@ -362,14 +368,27 @@ impl<'source> FromPyObject<'source> for Condition { } } -/// The body of a [`Condition::EventMatch`] +/// The body of a [`Condition::EventMatch`] with a pattern. #[derive(Serialize, Deserialize, Debug, Clone)] pub struct EventMatchCondition { pub key: Cow<'static, str>, - #[serde(skip_serializing_if = "Option::is_none")] - pub pattern: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - pub pattern_type: Option>, + pub pattern: Cow<'static, str>, +} + +#[derive(Serialize, Debug, Clone)] +#[serde(rename_all = "snake_case")] +pub enum EventMatchPatternType { + UserId, + UserLocalpart, +} + +/// The body of a [`Condition::EventMatch`] that uses user_id or user_localpart as a pattern. +#[derive(Serialize, Debug, Clone)] +pub struct EventMatchTypeCondition { + pub key: Cow<'static, str>, + // During serialization, the pattern_type property gets replaced with a + // pattern property of the correct value in synapse.push.clientformat.format_push_rules_for_user. + pub pattern_type: Cow<'static, EventMatchPatternType>, } /// The body of a [`Condition::ExactEventMatch`] @@ -386,8 +405,18 @@ pub struct RelatedEventMatchCondition { pub key: Option>, #[serde(skip_serializing_if = "Option::is_none")] pub pattern: Option>, + pub rel_type: Cow<'static, str>, #[serde(skip_serializing_if = "Option::is_none")] - pub pattern_type: Option>, + pub include_fallbacks: Option, +} + +/// The body of a [`Condition::RelatedEventMatch`] that uses user_id or user_localpart as a pattern. +#[derive(Serialize, Debug, Clone)] +pub struct RelatedEventMatchTypeCondition { + // This is only used if pattern_type exists (and thus key must exist), so is + // a bit simpler than RelatedEventMatchCondition. + pub key: Cow<'static, str>, + pub pattern_type: Cow<'static, EventMatchPatternType>, pub rel_type: Cow<'static, str>, #[serde(skip_serializing_if = "Option::is_none")] pub include_fallbacks: Option, @@ -571,8 +600,7 @@ impl FilteredPushRules { fn test_serialize_condition() { let condition = Condition::Known(KnownCondition::EventMatch(EventMatchCondition { key: "content.body".into(), - pattern: Some("coffee".into()), - pattern_type: None, + pattern: "coffee".into(), })); let json = serde_json::to_string(&condition).unwrap(); @@ -586,7 +614,33 @@ fn test_serialize_condition() { fn test_deserialize_condition() { let json = r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#; - let _: Condition = serde_json::from_str(json).unwrap(); + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!( + condition, + Condition::Known(KnownCondition::EventMatch(_)) + )); +} + +#[test] +fn test_serialize_event_match_condition_with_pattern_type() { + let condition = Condition::Known(KnownCondition::EventMatchType(EventMatchTypeCondition { + key: "content.body".into(), + pattern_type: Cow::Owned(EventMatchPatternType::UserId), + })); + + let json = serde_json::to_string(&condition).unwrap(); + assert_eq!( + json, + r#"{"kind":"event_match","key":"content.body","pattern_type":"user_id"}"# + ) +} + +#[test] +fn test_cannot_deserialize_event_match_condition_with_pattern_type() { + let json = r#"{"kind":"event_match","key":"content.body","pattern_type":"user_id"}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!(condition, Condition::Unknown(_))); } #[test] @@ -600,6 +654,37 @@ fn test_deserialize_unstable_msc3664_condition() { )); } +#[test] +fn test_serialize_unstable_msc3664_condition_with_pattern_type() { + let condition = Condition::Known(KnownCondition::RelatedEventMatchType( + RelatedEventMatchTypeCondition { + key: "content.body".into(), + pattern_type: Cow::Owned(EventMatchPatternType::UserId), + rel_type: "m.in_reply_to".into(), + include_fallbacks: Some(true), + }, + )); + + let json = serde_json::to_string(&condition).unwrap(); + assert_eq!( + json, + r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern_type":"user_id","rel_type":"m.in_reply_to","include_fallbacks":true}"# + ) +} + +#[test] +fn test_cannot_deserialize_unstable_msc3664_condition_with_pattern_type() { + let json = r#"{"kind":"im.nheko.msc3664.related_event_match","key":"content.body","pattern_type":"user_id","rel_type":"m.in_reply_to"}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + // Since pattern is optional on RelatedEventMatch it deserializes it to that + // instead of RelatedEventMatchType. + assert!(matches!( + condition, + Condition::Known(KnownCondition::RelatedEventMatch(_)) + )); +} + #[test] fn test_deserialize_unstable_msc3931_condition() { let json = diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 4e858fd16f33..1d30e3c3e4cf 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -401,6 +401,33 @@ def test_event_match_non_body(self) -> None: "pattern should not match before a newline", ) + def test_event_match_pattern(self) -> None: + """Check that event_match conditions do not use a "pattern_type" from user data.""" + + # The pattern_type should not be deserialized into anything valid. + condition = { + "kind": "event_match", + "key": "content.value", + "pattern_type": "user_id", + } + self._assert_not_matches( + condition, + {"value": "@user:test"}, + "should not be possible to pass a pattern_type in", + ) + + # This is an internal-only condition which shouldn't get deserialized. + condition = { + "kind": "event_match_type", + "key": "content.value", + "pattern_type": "user_id", + } + self._assert_not_matches( + condition, + {"value": "@user:test"}, + "should not be possible to pass a pattern_type in", + ) + def test_exact_event_match_string(self) -> None: """Check that exact_event_match conditions work as expected for strings.""" From c369d82df0eac691ccb549051dd61dd77b83d1e9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 28 Feb 2023 10:17:55 -0500 Subject: [PATCH 253/344] Add missing type hints to InsecureInterceptableContextFactory. (#15164) --- changelog.d/15164.misc | 1 + mypy.ini | 3 --- synapse/http/client.py | 5 +++-- 3 files changed, 4 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15164.misc diff --git a/changelog.d/15164.misc b/changelog.d/15164.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15164.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 94562d0bce10..572734f8e7cf 100644 --- a/mypy.ini +++ b/mypy.ini @@ -36,9 +36,6 @@ exclude = (?x) [mypy-synapse.federation.transport.client] disallow_untyped_defs = False -[mypy-synapse.http.client] -disallow_untyped_defs = False - [mypy-synapse.http.matrixfederationclient] disallow_untyped_defs = False diff --git a/synapse/http/client.py b/synapse/http/client.py index a05f297933ca..ae48e7c3f004 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -44,6 +44,7 @@ IAddress, IDelayedCall, IHostResolution, + IOpenSSLContextFactory, IReactorCore, IReactorPluggableNameResolver, IReactorTime, @@ -958,8 +959,8 @@ def __init__(self) -> None: self._context = SSL.Context(SSL.SSLv23_METHOD) self._context.set_verify(VERIFY_NONE, lambda *_: False) - def getContext(self, hostname=None, port=None): + def getContext(self) -> SSL.Context: return self._context - def creatorForNetloc(self, hostname: bytes, port: int): + def creatorForNetloc(self, hostname: bytes, port: int) -> IOpenSSLContextFactory: return self From 682d31c7023b6b7299e74bc631e4d2acc60f91ac Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 28 Feb 2023 16:37:19 +0000 Subject: [PATCH 254/344] Allow use of the `/filter` Client-Server APIs on workers. (#15134) --- changelog.d/15134.feature | 1 + docker/configure_workers_and_start.py | 1 + docs/workers.md | 1 + synapse/rest/__init__.py | 3 +-- synapse/storage/databases/main/__init__.py | 4 ++-- synapse/storage/databases/main/filtering.py | 25 +++++++++++++++++---- 6 files changed, 27 insertions(+), 8 deletions(-) create mode 100644 changelog.d/15134.feature diff --git a/changelog.d/15134.feature b/changelog.d/15134.feature new file mode 100644 index 000000000000..0dbb30bc8f13 --- /dev/null +++ b/changelog.d/15134.feature @@ -0,0 +1 @@ +Allow use of the `/filter` Client-Server APIs on workers. \ No newline at end of file diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 58c62f2231f3..7f615e50663f 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -142,6 +142,7 @@ "^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases", "^/_matrix/client/v1/rooms/.*/timestamp_to_event$", "^/_matrix/client/(api/v1|r0|v3|unstable)/search", + "^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)", ], "shared_extra_conf": {}, "worker_extra_conf": "", diff --git a/docs/workers.md b/docs/workers.md index 2eb970ffa6a0..35a96f12a91b 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -232,6 +232,7 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$ ^/_matrix/client/v1/rooms/.*/timestamp_to_event$ ^/_matrix/client/(api/v1|r0|v3|unstable)/search$ + ^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$) # Encryption requests ^/_matrix/client/(r0|v3|unstable)/keys/query$ diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 14c4e6ebbbee..c327f1504369 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -108,8 +108,7 @@ def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None: if is_main_process: logout.register_servlets(hs, client_resource) sync.register_servlets(hs, client_resource) - if is_main_process: - filter.register_servlets(hs, client_resource) + filter.register_servlets(hs, client_resource) account.register_servlets(hs, client_resource) register.register_servlets(hs, client_resource) if is_main_process: diff --git a/synapse/storage/databases/main/__init__.py b/synapse/storage/databases/main/__init__.py index 837dc7646e64..dc3948c17027 100644 --- a/synapse/storage/databases/main/__init__.py +++ b/synapse/storage/databases/main/__init__.py @@ -43,7 +43,7 @@ from .event_push_actions import EventPushActionsStore from .events_bg_updates import EventsBackgroundUpdatesStore from .events_forward_extremities import EventForwardExtremitiesStore -from .filtering import FilteringStore +from .filtering import FilteringWorkerStore from .keys import KeyStore from .lock import LockStore from .media_repository import MediaRepositoryStore @@ -99,7 +99,7 @@ class DataStore( EventFederationStore, MediaRepositoryStore, RejectionsStore, - FilteringStore, + FilteringWorkerStore, PusherStore, PushRuleStore, ApplicationServiceTransactionStore, diff --git a/synapse/storage/databases/main/filtering.py b/synapse/storage/databases/main/filtering.py index 12f3b601f11c..8e57c8e5a07a 100644 --- a/synapse/storage/databases/main/filtering.py +++ b/synapse/storage/databases/main/filtering.py @@ -17,7 +17,7 @@ from canonicaljson import encode_canonical_json -from synapse.api.errors import Codes, SynapseError +from synapse.api.errors import Codes, StoreError, SynapseError from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import LoggingTransaction from synapse.types import JsonDict @@ -46,8 +46,6 @@ async def get_user_filter( return db_to_json(def_json) - -class FilteringStore(FilteringWorkerStore): async def add_user_filter(self, user_localpart: str, user_filter: JsonDict) -> int: def_json = encode_canonical_json(user_filter) @@ -79,4 +77,23 @@ def _do_txn(txn: LoggingTransaction) -> int: return filter_id - return await self.db_pool.runInteraction("add_user_filter", _do_txn) + attempts = 0 + while True: + # Try a few times. + # This is technically needed if a user tries to create two filters at once, + # leading to two concurrent transactions. + # The failure case would be: + # - SELECT filter_id ... filter_json = ? → both transactions return no rows + # - SELECT MAX(filter_id) ... → both transactions return e.g. 5 + # - INSERT INTO ... → both transactions insert filter_id = 6 + # One of the transactions will commit. The other will get a unique key + # constraint violation error (IntegrityError). This is not the same as a + # serialisability violation, which would be automatically retried by + # `runInteraction`. + try: + return await self.db_pool.runInteraction("add_user_filter", _do_txn) + except self.db_pool.engine.module.IntegrityError: + attempts += 1 + + if attempts >= 5: + raise StoreError(500, "Couldn't generate a filter ID.") From 8c3fa748e6c42802c10cea0b9bcece4cf033e751 Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Tue, 28 Feb 2023 08:57:09 -0800 Subject: [PATCH 255/344] 1.78.0 --- CHANGES.md | 9 +++++++++ changelog.d/15150.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/15150.bugfix diff --git a/CHANGES.md b/CHANGES.md index f5c19bcb9763..644ef6e036ac 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.78.0 (2023-02-28) +=========================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.76 where 5s delays would occasionally occur in deployments using workers. ([\#15150](https://github.com/matrix-org/synapse/issues/15150)) + + Synapse 1.78.0rc1 (2023-02-21) ============================== diff --git a/changelog.d/15150.bugfix b/changelog.d/15150.bugfix deleted file mode 100644 index 8668bc587f8b..000000000000 --- a/changelog.d/15150.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.76 where 5s delays would occasionally occur in deployments using workers. diff --git a/debian/changelog b/debian/changelog index f9e95ee5e2e9..0f094308c1f6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.78.0) stable; urgency=medium + + * New Synapse release 1.78.0. + + -- Synapse Packaging team Tue, 28 Feb 2023 08:56:03 -0800 + matrix-synapse-py3 (1.78.0~rc1) stable; urgency=medium * Add `matrix-org-archive-keyring` package as recommended. diff --git a/pyproject.toml b/pyproject.toml index cef7d295c131..482644e0435c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.78.0rc1" +version = "1.78.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From d62cd940cb38e706f7fadc279017b0be3f3f29a3 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 28 Feb 2023 17:11:26 +0000 Subject: [PATCH 256/344] Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. (#15163) --- changelog.d/15163.bugfix | 1 + synapse/rest/client/sync.py | 25 +++++++++++++-- .../storage/databases/main/account_data.py | 31 +++++++++++++++++++ tests/storage/test_account_data.py | 22 +++++++++++++ 4 files changed, 77 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15163.bugfix diff --git a/changelog.d/15163.bugfix b/changelog.d/15163.bugfix new file mode 100644 index 000000000000..7ff1cd4463cd --- /dev/null +++ b/changelog.d/15163.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. \ No newline at end of file diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index f2013faeb206..8fcb8ac3d9d4 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -16,7 +16,7 @@ from collections import defaultdict from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union -from synapse.api.constants import EduTypes, Membership, PresenceState +from synapse.api.constants import AccountDataTypes, EduTypes, Membership, PresenceState from synapse.api.errors import Codes, StoreError, SynapseError from synapse.api.filtering import FilterCollection from synapse.api.presence import UserPresenceState @@ -139,7 +139,28 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: device_id, ) - request_key = (user, timeout, since, filter_id, full_state, device_id) + # Stream position of the last ignored users account data event for this user, + # if we're initial syncing. + # We include this in the request key to invalidate an initial sync + # in the response cache once the set of ignored users has changed. + # (We filter out ignored users from timeline events, so our sync response + # is invalid once the set of ignored users changes.) + last_ignore_accdata_streampos: Optional[int] = None + if not since: + # No `since`, so this is an initial sync. + last_ignore_accdata_streampos = await self.store.get_latest_stream_id_for_global_account_data_by_type_for_user( + user.to_string(), AccountDataTypes.IGNORED_USER_LIST + ) + + request_key = ( + user, + timeout, + since, + filter_id, + full_state, + device_id, + last_ignore_accdata_streampos, + ) if filter_id is None: filter_collection = self.filtering.DEFAULT_FILTER_COLLECTION diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 95567826f27a..308d19440f63 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -237,6 +237,37 @@ async def get_global_account_data_by_type_for_user( else: return None + async def get_latest_stream_id_for_global_account_data_by_type_for_user( + self, user_id: str, data_type: str + ) -> Optional[int]: + """ + Returns: + The stream ID of the account data, + or None if there is no such account data. + """ + + def get_latest_stream_id_for_global_account_data_by_type_for_user_txn( + txn: LoggingTransaction, + ) -> Optional[int]: + sql = """ + SELECT stream_id FROM account_data + WHERE user_id = ? AND account_data_type = ? + ORDER BY stream_id DESC + LIMIT 1 + """ + txn.execute(sql, (user_id, data_type)) + + row = txn.fetchone() + if row: + return row[0] + else: + return None + + return await self.db_pool.runInteraction( + "get_latest_stream_id_for_global_account_data_by_type_for_user", + get_latest_stream_id_for_global_account_data_by_type_for_user_txn, + ) + @cached(num_args=2, tree=True) async def get_account_data_for_room( self, user_id: str, room_id: str diff --git a/tests/storage/test_account_data.py b/tests/storage/test_account_data.py index 1bfd11ceae4f..b12691a9d354 100644 --- a/tests/storage/test_account_data.py +++ b/tests/storage/test_account_data.py @@ -140,3 +140,25 @@ def test_invalid_data(self) -> None: # No one ignores the user now. self.assert_ignored(self.user, set()) self.assert_ignorers("@other:test", set()) + + def test_ignoring_users_with_latest_stream_ids(self) -> None: + """Test that ignoring users updates the latest stream ID for the ignored + user list account data.""" + + def get_latest_ignore_streampos(user_id: str) -> Optional[int]: + return self.get_success( + self.store.get_latest_stream_id_for_global_account_data_by_type_for_user( + user_id, AccountDataTypes.IGNORED_USER_LIST + ) + ) + + self.assertIsNone(get_latest_ignore_streampos("@user:test")) + + self._update_ignore_list("@other:test", "@another:remote") + + self.assertEqual(get_latest_ignore_streampos("@user:test"), 2) + + # Add one user, remove one user, and leave one user. + self._update_ignore_list("@foo:test", "@another:remote") + + self.assertEqual(get_latest_ignore_streampos("@user:test"), 3) From 69553052cca6381ddd1d2996b5db28b2b505f527 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 28 Feb 2023 12:51:11 -0500 Subject: [PATCH 257/344] Update spam checker documentation for moved media modules. (#15175) --- changelog.d/15175.misc | 1 + docs/modules/spam_checker_callbacks.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15175.misc diff --git a/changelog.d/15175.misc b/changelog.d/15175.misc new file mode 100644 index 000000000000..8de5f952398f --- /dev/null +++ b/changelog.d/15175.misc @@ -0,0 +1 @@ +Refactor the media modules. diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index 50969edd46ff..1a0c6ec954ca 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -307,8 +307,8 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a ```python async def check_media_file_for_spam( - file_wrapper: "synapse.rest.media.v1.media_storage.ReadableFileWrapper", - file_info: "synapse.rest.media.v1._base.FileInfo", + file_wrapper: "synapse.media.media_storage.ReadableFileWrapper", + file_info: "synapse.media._base.FileInfo", ) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` From 2b78981736f9004f99b1760e3e77b234f92755a7 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 28 Feb 2023 18:49:28 +0000 Subject: [PATCH 258/344] Remove support for aggregating reactions (#15172) It turns out that no clients rely on server-side aggregation of `m.annotation` relationships: it's just not very useful as currently implemented. It's also non-trivial to calculate. I want to remove it from MSC2677, so to keep the implementation in line, let's remove it here. --- changelog.d/15172.feature | 1 + synapse/events/utils.py | 5 - synapse/handlers/relations.py | 76 +------- synapse/storage/databases/main/cache.py | 3 - synapse/storage/databases/main/events.py | 4 - .../databases/main/events_bg_updates.py | 3 - synapse/storage/databases/main/relations.py | 137 -------------- tests/rest/client/test_relations.py | 178 +++--------------- 8 files changed, 30 insertions(+), 377 deletions(-) create mode 100644 changelog.d/15172.feature diff --git a/changelog.d/15172.feature b/changelog.d/15172.feature new file mode 100644 index 000000000000..3f789edb7f25 --- /dev/null +++ b/changelog.d/15172.feature @@ -0,0 +1 @@ +Remove support for server-side aggregation of reactions. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index ebf8c7ed83ba..eaa6cad4afa8 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -516,11 +516,6 @@ def _inject_bundled_aggregations( # being serialized. serialized_aggregations = {} - if event_aggregations.annotations: - serialized_aggregations[ - RelationTypes.ANNOTATION - ] = event_aggregations.annotations - if event_aggregations.references: serialized_aggregations[ RelationTypes.REFERENCE diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 0fb15391e07e..553053b69469 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -60,13 +60,12 @@ class BundledAggregations: Some values require additional processing during serialization. """ - annotations: Optional[JsonDict] = None references: Optional[JsonDict] = None replace: Optional[EventBase] = None thread: Optional[_ThreadAggregation] = None def __bool__(self) -> bool: - return bool(self.annotations or self.references or self.replace or self.thread) + return bool(self.references or self.replace or self.thread) class RelationsHandler: @@ -227,67 +226,6 @@ async def redact_events_related_to( e.msg, ) - async def get_annotations_for_events( - self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset() - ) -> Dict[str, List[JsonDict]]: - """Get a list of annotations to the given events, grouped by event type and - aggregation key, sorted by count. - - This is used e.g. to get the what and how many reactions have happened - on an event. - - Args: - event_ids: Fetch events that relate to these event IDs. - ignored_users: The users ignored by the requesting user. - - Returns: - A map of event IDs to a list of groups of annotations that match. - Each entry is a dict with `type`, `key` and `count` fields. - """ - # Get the base results for all users. - full_results = await self._main_store.get_aggregation_groups_for_events( - event_ids - ) - - # Avoid additional logic if there are no ignored users. - if not ignored_users: - return { - event_id: results - for event_id, results in full_results.items() - if results - } - - # Then subtract off the results for any ignored users. - ignored_results = await self._main_store.get_aggregation_groups_for_users( - [event_id for event_id, results in full_results.items() if results], - ignored_users, - ) - - filtered_results = {} - for event_id, results in full_results.items(): - # If no annotations, skip. - if not results: - continue - - # If there are not ignored results for this event, copy verbatim. - if event_id not in ignored_results: - filtered_results[event_id] = results - continue - - # Otherwise, subtract out the ignored results. - event_ignored_results = ignored_results[event_id] - for result in results: - key = (result["type"], result["key"]) - if key in event_ignored_results: - # Ensure to not modify the cache. - result = result.copy() - result["count"] -= event_ignored_results[key] - if result["count"] <= 0: - continue - filtered_results.setdefault(event_id, []).append(result) - - return filtered_results - async def get_references_for_events( self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset() ) -> Dict[str, List[_RelatedEvent]]: @@ -531,17 +469,6 @@ async def get_bundled_aggregations( # (as that is what makes it part of the thread). relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD - async def _fetch_annotations() -> None: - """Fetch any annotations (ie, reactions) to bundle with this event.""" - annotations_by_event_id = await self.get_annotations_for_events( - events_by_id.keys(), ignored_users=ignored_users - ) - for event_id, annotations in annotations_by_event_id.items(): - if annotations: - results.setdefault(event_id, BundledAggregations()).annotations = { - "chunk": annotations - } - async def _fetch_references() -> None: """Fetch any references to bundle with this event.""" references_by_event_id = await self.get_references_for_events( @@ -575,7 +502,6 @@ async def _fetch_edits() -> None: await make_deferred_yieldable( gather_results( ( - run_in_background(_fetch_annotations), run_in_background(_fetch_references), run_in_background(_fetch_edits), ) diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 5b6643169139..096dec7f876e 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -266,9 +266,6 @@ def _invalidate_caches_for_event( if relates_to: self._attempt_to_invalidate_cache("get_relations_for_event", (relates_to,)) self._attempt_to_invalidate_cache("get_references_for_event", (relates_to,)) - self._attempt_to_invalidate_cache( - "get_aggregation_groups_for_event", (relates_to,) - ) self._attempt_to_invalidate_cache("get_applicable_edit", (relates_to,)) self._attempt_to_invalidate_cache("get_thread_summary", (relates_to,)) self._attempt_to_invalidate_cache("get_thread_participated", (relates_to,)) diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index 73b8aea16cc7..a8a4ed4436f6 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -2024,10 +2024,6 @@ def _handle_redact_relations( self.store._invalidate_cache_and_stream( txn, self.store.get_relations_for_event, (redacted_relates_to,) ) - if rel_type == RelationTypes.ANNOTATION: - self.store._invalidate_cache_and_stream( - txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,) - ) if rel_type == RelationTypes.REFERENCE: self.store._invalidate_cache_and_stream( txn, self.store.get_references_for_event, (redacted_relates_to,) diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index 0a275e6ce665..daef3685b09a 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -1219,9 +1219,6 @@ def _event_arbitrary_relations_txn(txn: LoggingTransaction) -> int: self._invalidate_cache_and_stream( # type: ignore[attr-defined] txn, self.get_relations_for_event, cache_tuple # type: ignore[attr-defined] ) - self._invalidate_cache_and_stream( # type: ignore[attr-defined] - txn, self.get_aggregation_groups_for_event, cache_tuple # type: ignore[attr-defined] - ) self._invalidate_cache_and_stream( # type: ignore[attr-defined] txn, self.get_thread_summary, cache_tuple # type: ignore[attr-defined] ) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index fa3266c081b5..bc3a83919c34 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -397,143 +397,6 @@ async def event_is_target_of_relation(self, parent_id: str) -> bool: ) return result is not None - @cached() - async def get_aggregation_groups_for_event( - self, event_id: str - ) -> Sequence[JsonDict]: - raise NotImplementedError() - - @cachedList( - cached_method_name="get_aggregation_groups_for_event", list_name="event_ids" - ) - async def get_aggregation_groups_for_events( - self, event_ids: Collection[str] - ) -> Mapping[str, Optional[List[JsonDict]]]: - """Get a list of annotations on the given events, grouped by event type and - aggregation key, sorted by count. - - This is used e.g. to get the what and how many reactions have happend - on an event. - - Args: - event_ids: Fetch events that relate to these event IDs. - - Returns: - A map of event IDs to a list of groups of annotations that match. - Each entry is a dict with `type`, `key` and `count` fields. - """ - # The number of entries to return per event ID. - limit = 5 - - clause, args = make_in_list_sql_clause( - self.database_engine, "relates_to_id", event_ids - ) - args.append(RelationTypes.ANNOTATION) - - sql = f""" - SELECT - relates_to_id, - annotation.type, - aggregation_key, - COUNT(DISTINCT annotation.sender) - FROM events AS annotation - INNER JOIN event_relations USING (event_id) - INNER JOIN events AS parent ON - parent.event_id = relates_to_id - AND parent.room_id = annotation.room_id - WHERE - {clause} - AND relation_type = ? - GROUP BY relates_to_id, annotation.type, aggregation_key - ORDER BY relates_to_id, COUNT(*) DESC - """ - - def _get_aggregation_groups_for_events_txn( - txn: LoggingTransaction, - ) -> Mapping[str, List[JsonDict]]: - txn.execute(sql, args) - - result: Dict[str, List[JsonDict]] = {} - for event_id, type, key, count in cast( - List[Tuple[str, str, str, int]], txn - ): - event_results = result.setdefault(event_id, []) - - # Limit the number of results per event ID. - if len(event_results) == limit: - continue - - event_results.append({"type": type, "key": key, "count": count}) - - return result - - return await self.db_pool.runInteraction( - "get_aggregation_groups_for_events", _get_aggregation_groups_for_events_txn - ) - - async def get_aggregation_groups_for_users( - self, event_ids: Collection[str], users: FrozenSet[str] - ) -> Dict[str, Dict[Tuple[str, str], int]]: - """Fetch the partial aggregations for an event for specific users. - - This is used, in conjunction with get_aggregation_groups_for_event, to - remove information from the results for ignored users. - - Args: - event_ids: Fetch events that relate to these event IDs. - users: The users to fetch information for. - - Returns: - A map of event ID to a map of (event type, aggregation key) to a - count of users. - """ - - if not users: - return {} - - events_sql, args = make_in_list_sql_clause( - self.database_engine, "relates_to_id", event_ids - ) - - users_sql, users_args = make_in_list_sql_clause( - self.database_engine, "annotation.sender", users - ) - args.extend(users_args) - args.append(RelationTypes.ANNOTATION) - - sql = f""" - SELECT - relates_to_id, - annotation.type, - aggregation_key, - COUNT(DISTINCT annotation.sender) - FROM events AS annotation - INNER JOIN event_relations USING (event_id) - INNER JOIN events AS parent ON - parent.event_id = relates_to_id - AND parent.room_id = annotation.room_id - WHERE {events_sql} AND {users_sql} AND relation_type = ? - GROUP BY relates_to_id, annotation.type, aggregation_key - ORDER BY relates_to_id, COUNT(*) DESC - """ - - def _get_aggregation_groups_for_users_txn( - txn: LoggingTransaction, - ) -> Dict[str, Dict[Tuple[str, str], int]]: - txn.execute(sql, args) - - result: Dict[str, Dict[Tuple[str, str], int]] = {} - for event_id, type, key, count in cast( - List[Tuple[str, str, str, int]], txn - ): - result.setdefault(event_id, {})[(type, key)] = count - - return result - - return await self.db_pool.runInteraction( - "get_aggregation_groups_for_users", _get_aggregation_groups_for_users_txn - ) - @cached() async def get_references_for_event(self, event_id: str) -> List[JsonDict]: raise NotImplementedError() diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index c8a6911d5ee5..a8a0a1614104 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -1080,48 +1080,6 @@ def assert_bundle(event_json: JsonDict) -> None: ] assert_bundle(self._find_event_in_chunk(chunk)) - def test_annotation(self) -> None: - """ - Test that annotations get correctly bundled. - """ - # Setup by sending a variety of relations. - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "b") - - def assert_annotations(bundled_aggregations: JsonDict) -> None: - self.assertEqual( - { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - ] - }, - bundled_aggregations, - ) - - self._test_bundled_aggregations(RelationTypes.ANNOTATION, assert_annotations, 7) - - def test_annotation_to_annotation(self) -> None: - """Any relation to an annotation should be ignored.""" - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - event_id = channel.json_body["event_id"] - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "b", parent_id=event_id - ) - - # Fetch the initial annotation event to see if it has bundled aggregations. - channel = self.make_request( - "GET", - f"/_matrix/client/v3/rooms/{self.room}/event/{event_id}", - access_token=self.user_token, - ) - self.assertEquals(200, channel.code, channel.json_body) - # The first annotationt should not have any bundled aggregations. - self.assertNotIn("m.relations", channel.json_body["unsigned"]) - def test_reference(self) -> None: """ Test that references get correctly bundled. @@ -1138,7 +1096,7 @@ def assert_annotations(bundled_aggregations: JsonDict) -> None: bundled_aggregations, ) - self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 7) + self._test_bundled_aggregations(RelationTypes.REFERENCE, assert_annotations, 6) def test_thread(self) -> None: """ @@ -1183,7 +1141,7 @@ def assert_thread(bundled_aggregations: JsonDict) -> None: # The "user" sent the root event and is making queries for the bundled # aggregations: they have participated. - self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 7) + self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 6) # The "user2" sent replies in the thread and is making queries for the # bundled aggregations: they have participated. # @@ -1208,9 +1166,10 @@ def test_thread_with_bundled_aggregations_for_latest(self) -> None: channel = self._send_relation(RelationTypes.THREAD, "m.room.test") thread_2 = channel.json_body["event_id"] - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_2 + channel = self._send_relation( + RelationTypes.REFERENCE, "org.matrix.test", parent_id=thread_2 ) + reference_event_id = channel.json_body["event_id"] def assert_thread(bundled_aggregations: JsonDict) -> None: self.assertEqual(2, bundled_aggregations.get("count")) @@ -1235,17 +1194,15 @@ def assert_thread(bundled_aggregations: JsonDict) -> None: self.assert_dict( { "m.relations": { - RelationTypes.ANNOTATION: { - "chunk": [ - {"type": "m.reaction", "key": "a", "count": 1}, - ] + RelationTypes.REFERENCE: { + "chunk": [{"event_id": reference_event_id}] }, } }, bundled_aggregations["latest_event"].get("unsigned"), ) - self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 7) + self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 6) def test_nested_thread(self) -> None: """ @@ -1363,10 +1320,11 @@ def test_aggregation_get_event_for_thread(self) -> None: channel = self._send_relation(RelationTypes.THREAD, "m.room.test") thread_id = channel.json_body["event_id"] - # Annotate the thread. - self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", parent_id=thread_id + # Make a reference to the thread. + channel = self._send_relation( + RelationTypes.REFERENCE, "org.matrix.test", parent_id=thread_id ) + reference_event_id = channel.json_body["event_id"] channel = self.make_request( "GET", @@ -1377,9 +1335,7 @@ def test_aggregation_get_event_for_thread(self) -> None: self.assertEqual( channel.json_body["unsigned"].get("m.relations"), { - RelationTypes.ANNOTATION: { - "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] - }, + RelationTypes.REFERENCE: {"chunk": [{"event_id": reference_event_id}]}, }, ) @@ -1396,9 +1352,7 @@ def test_aggregation_get_event_for_thread(self) -> None: self.assertEqual( thread_message["unsigned"].get("m.relations"), { - RelationTypes.ANNOTATION: { - "chunk": [{"count": 1, "key": "a", "type": "m.reaction"}] - }, + RelationTypes.REFERENCE: {"chunk": [{"event_id": reference_event_id}]}, }, ) @@ -1410,7 +1364,8 @@ def test_bundled_aggregations_with_filter(self) -> None: Note that the spec allows for a server to return additional fields beyond what is specified. """ - self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") + channel = self._send_relation(RelationTypes.REFERENCE, "org.matrix.test") + reference_event_id = channel.json_body["event_id"] # Note that the sync filter does not include "unsigned" as a field. filter = urllib.parse.quote_plus( @@ -1428,7 +1383,12 @@ def test_bundled_aggregations_with_filter(self) -> None: # Ensure there's bundled aggregations on it. self.assertIn("unsigned", parent_event) - self.assertIn("m.relations", parent_event["unsigned"]) + self.assertEqual( + parent_event["unsigned"].get("m.relations"), + { + RelationTypes.REFERENCE: {"chunk": [{"event_id": reference_event_id}]}, + }, + ) class RelationIgnoredUserTestCase(BaseRelationsTestCase): @@ -1475,53 +1435,8 @@ def _test_ignored_user( return before_aggregations[relation_type], after_aggregations[relation_type] - def test_annotation(self) -> None: - """Annotations should ignore""" - # Send 2 from us, 2 from the to be ignored user. - allowed_event_ids = [] - ignored_event_ids = [] - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="a") - allowed_event_ids.append(channel.json_body["event_id"]) - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="b") - allowed_event_ids.append(channel.json_body["event_id"]) - channel = self._send_relation( - RelationTypes.ANNOTATION, - "m.reaction", - key="a", - access_token=self.user2_token, - ) - ignored_event_ids.append(channel.json_body["event_id"]) - channel = self._send_relation( - RelationTypes.ANNOTATION, - "m.reaction", - key="c", - access_token=self.user2_token, - ) - ignored_event_ids.append(channel.json_body["event_id"]) - - before_aggregations, after_aggregations = self._test_ignored_user( - RelationTypes.ANNOTATION, allowed_event_ids, ignored_event_ids - ) - - self.assertCountEqual( - before_aggregations["chunk"], - [ - {"type": "m.reaction", "key": "a", "count": 2}, - {"type": "m.reaction", "key": "b", "count": 1}, - {"type": "m.reaction", "key": "c", "count": 1}, - ], - ) - - self.assertCountEqual( - after_aggregations["chunk"], - [ - {"type": "m.reaction", "key": "a", "count": 1}, - {"type": "m.reaction", "key": "b", "count": 1}, - ], - ) - def test_reference(self) -> None: - """Annotations should ignore""" + """Aggregations should exclude reference relations from ignored users""" channel = self._send_relation(RelationTypes.REFERENCE, "m.room.test") allowed_event_ids = [channel.json_body["event_id"]] @@ -1544,7 +1459,7 @@ def test_reference(self) -> None: ) def test_thread(self) -> None: - """Annotations should ignore""" + """Aggregations should exclude thread releations from ignored users""" channel = self._send_relation(RelationTypes.THREAD, "m.room.test") allowed_event_ids = [channel.json_body["event_id"]] @@ -1618,43 +1533,6 @@ def _get_threads(self) -> List[Tuple[str, str]]: for t in threads ] - def test_redact_relation_annotation(self) -> None: - """ - Test that annotations of an event are properly handled after the - annotation is redacted. - - The redacted relation should not be included in bundled aggregations or - the response to relations. - """ - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "a") - to_redact_event_id = channel.json_body["event_id"] - - channel = self._send_relation( - RelationTypes.ANNOTATION, "m.reaction", "a", access_token=self.user2_token - ) - unredacted_event_id = channel.json_body["event_id"] - - # Both relations should exist. - event_ids = self._get_related_events() - relations = self._get_bundled_aggregations() - self.assertCountEqual(event_ids, [to_redact_event_id, unredacted_event_id]) - self.assertEquals( - relations["m.annotation"], - {"chunk": [{"type": "m.reaction", "key": "a", "count": 2}]}, - ) - - # Redact one of the reactions. - self._redact(to_redact_event_id) - - # The unredacted relation should still exist. - event_ids = self._get_related_events() - relations = self._get_bundled_aggregations() - self.assertEquals(event_ids, [unredacted_event_id]) - self.assertEquals( - relations["m.annotation"], - {"chunk": [{"type": "m.reaction", "key": "a", "count": 1}]}, - ) - def test_redact_relation_thread(self) -> None: """ Test that thread replies are properly handled after the thread reply redacted. @@ -1775,14 +1653,14 @@ def test_redact_parent_annotation(self) -> None: is redacted. """ # Add a relation - channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", key="👍") + channel = self._send_relation(RelationTypes.REFERENCE, "org.matrix.test") related_event_id = channel.json_body["event_id"] # The relations should exist. event_ids = self._get_related_events() relations = self._get_bundled_aggregations() self.assertEqual(len(event_ids), 1) - self.assertIn(RelationTypes.ANNOTATION, relations) + self.assertIn(RelationTypes.REFERENCE, relations) # Redact the original event. self._redact(self.parent_id) @@ -1792,8 +1670,8 @@ def test_redact_parent_annotation(self) -> None: relations = self._get_bundled_aggregations() self.assertEquals(event_ids, [related_event_id]) self.assertEquals( - relations["m.annotation"], - {"chunk": [{"type": "m.reaction", "key": "👍", "count": 1}]}, + relations[RelationTypes.REFERENCE], + {"chunk": [{"event_id": related_event_id}]}, ) def test_redact_parent_thread(self) -> None: From 916b8061d20dc0902b7f2d42d994efc20300e9e7 Mon Sep 17 00:00:00 2001 From: Hugh Nimmo-Smith Date: Thu, 2 Mar 2023 10:34:59 +0000 Subject: [PATCH 259/344] Implementation of MSC3967: Don't require UIA for initial upload of cross signing keys (#15077) --- changelog.d/15077.feature | 1 + synapse/config/experimental.py | 3 + synapse/handlers/e2e_keys.py | 14 ++++ synapse/rest/client/keys.py | 32 +++++--- tests/rest/client/test_keys.py | 141 +++++++++++++++++++++++++++++++++ 5 files changed, 182 insertions(+), 9 deletions(-) create mode 100644 changelog.d/15077.feature diff --git a/changelog.d/15077.feature b/changelog.d/15077.feature new file mode 100644 index 000000000000..384e751056b7 --- /dev/null +++ b/changelog.d/15077.feature @@ -0,0 +1 @@ +Experimental support for MSC3967 to not require UIA for setting up cross-signing on first use. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index bc38fae0b6d1..7c81f055b60f 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -194,3 +194,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.msc3966_exact_event_property_contains = experimental.get( "msc3966_exact_event_property_contains", False ) + + # MSC3967: Do not require UIA when first uploading cross signing keys + self.msc3967_enabled = experimental.get("msc3967_enabled", False) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 43cbece21b69..4e9c8d8db0c7 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -1301,6 +1301,20 @@ async def _retrieve_cross_signing_keys_for_remote_user( return desired_key_data + async def is_cross_signing_set_up_for_user(self, user_id: str) -> bool: + """Checks if the user has cross-signing set up + + Args: + user_id: The user to check + + Returns: + True if the user has cross-signing set up, False otherwise + """ + existing_master_key = await self.store.get_e2e_cross_signing_key( + user_id, "master" + ) + return existing_master_key is not None + def _check_cross_signing_key( key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None diff --git a/synapse/rest/client/keys.py b/synapse/rest/client/keys.py index 7873b363c06c..32bb8b9a91a6 100644 --- a/synapse/rest/client/keys.py +++ b/synapse/rest/client/keys.py @@ -312,15 +312,29 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: user_id = requester.user.to_string() body = parse_json_object_from_request(request) - await self.auth_handler.validate_user_via_ui_auth( - requester, - request, - body, - "add a device signing key to your account", - # Allow skipping of UI auth since this is frequently called directly - # after login and it is silly to ask users to re-auth immediately. - can_skip_ui_auth=True, - ) + if self.hs.config.experimental.msc3967_enabled: + if await self.e2e_keys_handler.is_cross_signing_set_up_for_user(user_id): + # If we already have a master key then cross signing is set up and we require UIA to reset + await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body, + "reset the device signing key on your account", + # Do not allow skipping of UIA auth. + can_skip_ui_auth=False, + ) + # Otherwise we don't require UIA since we are setting up cross signing for first time + else: + # Previous behaviour is to always require UIA but allow it to be skipped + await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body, + "add a device signing key to your account", + # Allow skipping of UI auth since this is frequently called directly + # after login and it is silly to ask users to re-auth immediately. + can_skip_ui_auth=True, + ) result = await self.e2e_keys_handler.upload_signing_keys_for_user(user_id, body) return 200, result diff --git a/tests/rest/client/test_keys.py b/tests/rest/client/test_keys.py index 741fecea7713..8ee548905704 100644 --- a/tests/rest/client/test_keys.py +++ b/tests/rest/client/test_keys.py @@ -14,12 +14,21 @@ from http import HTTPStatus +from signedjson.key import ( + encode_verify_key_base64, + generate_signing_key, + get_verify_key, +) +from signedjson.sign import sign_json + from synapse.api.errors import Codes from synapse.rest import admin from synapse.rest.client import keys, login +from synapse.types import JsonDict from tests import unittest from tests.http.server._base import make_request_with_cancellation_test +from tests.unittest import override_config class KeyQueryTestCase(unittest.HomeserverTestCase): @@ -118,3 +127,135 @@ def test_key_query_cancellation(self) -> None: self.assertEqual(200, channel.code, msg=channel.result["body"]) self.assertIn(bob, channel.json_body["device_keys"]) + + def make_device_keys(self, user_id: str, device_id: str) -> JsonDict: + # We only generate a master key to simplify the test. + master_signing_key = generate_signing_key(device_id) + master_verify_key = encode_verify_key_base64(get_verify_key(master_signing_key)) + + return { + "master_key": sign_json( + { + "user_id": user_id, + "usage": ["master"], + "keys": {"ed25519:" + master_verify_key: master_verify_key}, + }, + user_id, + master_signing_key, + ), + } + + def test_device_signing_with_uia(self) -> None: + """Device signing key upload requires UIA.""" + password = "wonderland" + device_id = "ABCDEFGHI" + alice_id = self.register_user("alice", password) + alice_token = self.login("alice", password, device_id=device_id) + + content = self.make_device_keys(alice_id, device_id) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + content, + alice_token, + ) + + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) + # Grab the session + session = channel.json_body["session"] + # Ensure that flows are what is expected. + self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"]) + + # add UI auth + content["auth"] = { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": alice_id}, + "password": password, + "session": session, + } + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + content, + alice_token, + ) + + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + @override_config({"ui_auth": {"session_timeout": "15m"}}) + def test_device_signing_with_uia_session_timeout(self) -> None: + """Device signing key upload requires UIA buy passes with grace period.""" + password = "wonderland" + device_id = "ABCDEFGHI" + alice_id = self.register_user("alice", password) + alice_token = self.login("alice", password, device_id=device_id) + + content = self.make_device_keys(alice_id, device_id) + + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + content, + alice_token, + ) + + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + @override_config( + { + "experimental_features": {"msc3967_enabled": True}, + "ui_auth": {"session_timeout": "15s"}, + } + ) + def test_device_signing_with_msc3967(self) -> None: + """Device signing key follows MSC3967 behaviour when enabled.""" + password = "wonderland" + device_id = "ABCDEFGHI" + alice_id = self.register_user("alice", password) + alice_token = self.login("alice", password, device_id=device_id) + + keys1 = self.make_device_keys(alice_id, device_id) + + # Initial request should succeed as no existing keys are present. + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + keys1, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + keys2 = self.make_device_keys(alice_id, device_id) + + # Subsequent request should require UIA as keys already exist even though session_timeout is set. + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + keys2, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.UNAUTHORIZED, channel.result) + + # Grab the session + session = channel.json_body["session"] + # Ensure that flows are what is expected. + self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"]) + + # add UI auth + keys2["auth"] = { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": alice_id}, + "password": password, + "session": session, + } + + # Request should complete + channel = self.make_request( + "POST", + "/_matrix/client/v3/keys/device_signing/upload", + keys2, + alice_token, + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) From 65f10afb64127dc9412e24860c5e8a78f3dc9863 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 2 Mar 2023 11:38:46 +0100 Subject: [PATCH 260/344] Move event_reports to `RoomWorkerStore` (#15165) --- changelog.d/15165.misc | 1 + synapse/storage/databases/main/room.py | 354 ++++++++++++------------- 2 files changed, 178 insertions(+), 177 deletions(-) create mode 100644 changelog.d/15165.misc diff --git a/changelog.d/15165.misc b/changelog.d/15165.misc new file mode 100644 index 000000000000..a75be84dac7f --- /dev/null +++ b/changelog.d/15165.misc @@ -0,0 +1 @@ +Move `get_event_report` and `get_event_reports_paginate` from `RoomStore` to `RoomWorkerStore`. \ No newline at end of file diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index a2e9519cb6c4..3825bd60797c 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1417,6 +1417,183 @@ def get_un_partial_stated_rooms_from_stream_txn( get_un_partial_stated_rooms_from_stream_txn, ) + async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]: + """Retrieve an event report + + Args: + report_id: ID of reported event in database + Returns: + JSON dict of information from an event report or None if the + report does not exist. + """ + + def _get_event_report_txn( + txn: LoggingTransaction, report_id: int + ) -> Optional[Dict[str, Any]]: + sql = """ + SELECT + er.id, + er.received_ts, + er.room_id, + er.event_id, + er.user_id, + er.content, + events.sender, + room_stats_state.canonical_alias, + room_stats_state.name, + event_json.json AS event_json + FROM event_reports AS er + LEFT JOIN events + ON events.event_id = er.event_id + JOIN event_json + ON event_json.event_id = er.event_id + JOIN room_stats_state + ON room_stats_state.room_id = er.room_id + WHERE er.id = ? + """ + + txn.execute(sql, [report_id]) + row = txn.fetchone() + + if not row: + return None + + event_report = { + "id": row[0], + "received_ts": row[1], + "room_id": row[2], + "event_id": row[3], + "user_id": row[4], + "score": db_to_json(row[5]).get("score"), + "reason": db_to_json(row[5]).get("reason"), + "sender": row[6], + "canonical_alias": row[7], + "name": row[8], + "event_json": db_to_json(row[9]), + } + + return event_report + + return await self.db_pool.runInteraction( + "get_event_report", _get_event_report_txn, report_id + ) + + async def get_event_reports_paginate( + self, + start: int, + limit: int, + direction: Direction = Direction.BACKWARDS, + user_id: Optional[str] = None, + room_id: Optional[str] = None, + ) -> Tuple[List[Dict[str, Any]], int]: + """Retrieve a paginated list of event reports + + Args: + start: event offset to begin the query from + limit: number of rows to retrieve + direction: Whether to fetch the most recent first (backwards) or the + oldest first (forwards) + user_id: search for user_id. Ignored if user_id is None + room_id: search for room_id. Ignored if room_id is None + Returns: + Tuple of: + json list of event reports + total number of event reports matching the filter criteria + """ + + def _get_event_reports_paginate_txn( + txn: LoggingTransaction, + ) -> Tuple[List[Dict[str, Any]], int]: + filters = [] + args: List[object] = [] + + if user_id: + filters.append("er.user_id LIKE ?") + args.extend(["%" + user_id + "%"]) + if room_id: + filters.append("er.room_id LIKE ?") + args.extend(["%" + room_id + "%"]) + + if direction == Direction.BACKWARDS: + order = "DESC" + else: + order = "ASC" + + where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" + + # We join on room_stats_state despite not using any columns from it + # because the join can influence the number of rows returned; + # e.g. a room that doesn't have state, maybe because it was deleted. + # The query returning the total count should be consistent with + # the query returning the results. + sql = """ + SELECT COUNT(*) as total_event_reports + FROM event_reports AS er + JOIN room_stats_state ON room_stats_state.room_id = er.room_id + {} + """.format( + where_clause + ) + txn.execute(sql, args) + count = cast(Tuple[int], txn.fetchone())[0] + + sql = """ + SELECT + er.id, + er.received_ts, + er.room_id, + er.event_id, + er.user_id, + er.content, + events.sender, + room_stats_state.canonical_alias, + room_stats_state.name + FROM event_reports AS er + LEFT JOIN events + ON events.event_id = er.event_id + JOIN room_stats_state + ON room_stats_state.room_id = er.room_id + {where_clause} + ORDER BY er.received_ts {order} + LIMIT ? + OFFSET ? + """.format( + where_clause=where_clause, + order=order, + ) + + args += [limit, start] + txn.execute(sql, args) + + event_reports = [] + for row in txn: + try: + s = db_to_json(row[5]).get("score") + r = db_to_json(row[5]).get("reason") + except Exception: + logger.error("Unable to parse json from event_reports: %s", row[0]) + continue + event_reports.append( + { + "id": row[0], + "received_ts": row[1], + "room_id": row[2], + "event_id": row[3], + "user_id": row[4], + "score": s, + "reason": r, + "sender": row[6], + "canonical_alias": row[7], + "name": row[8], + } + ) + + return event_reports, count + + return await self.db_pool.runInteraction( + "get_event_reports_paginate", _get_event_reports_paginate_txn + ) + async def delete_event_report(self, report_id: int) -> bool: """Remove an event report from database. @@ -2189,183 +2366,6 @@ async def add_event_report( ) return next_id - async def get_event_report(self, report_id: int) -> Optional[Dict[str, Any]]: - """Retrieve an event report - - Args: - report_id: ID of reported event in database - Returns: - JSON dict of information from an event report or None if the - report does not exist. - """ - - def _get_event_report_txn( - txn: LoggingTransaction, report_id: int - ) -> Optional[Dict[str, Any]]: - sql = """ - SELECT - er.id, - er.received_ts, - er.room_id, - er.event_id, - er.user_id, - er.content, - events.sender, - room_stats_state.canonical_alias, - room_stats_state.name, - event_json.json AS event_json - FROM event_reports AS er - LEFT JOIN events - ON events.event_id = er.event_id - JOIN event_json - ON event_json.event_id = er.event_id - JOIN room_stats_state - ON room_stats_state.room_id = er.room_id - WHERE er.id = ? - """ - - txn.execute(sql, [report_id]) - row = txn.fetchone() - - if not row: - return None - - event_report = { - "id": row[0], - "received_ts": row[1], - "room_id": row[2], - "event_id": row[3], - "user_id": row[4], - "score": db_to_json(row[5]).get("score"), - "reason": db_to_json(row[5]).get("reason"), - "sender": row[6], - "canonical_alias": row[7], - "name": row[8], - "event_json": db_to_json(row[9]), - } - - return event_report - - return await self.db_pool.runInteraction( - "get_event_report", _get_event_report_txn, report_id - ) - - async def get_event_reports_paginate( - self, - start: int, - limit: int, - direction: Direction = Direction.BACKWARDS, - user_id: Optional[str] = None, - room_id: Optional[str] = None, - ) -> Tuple[List[Dict[str, Any]], int]: - """Retrieve a paginated list of event reports - - Args: - start: event offset to begin the query from - limit: number of rows to retrieve - direction: Whether to fetch the most recent first (backwards) or the - oldest first (forwards) - user_id: search for user_id. Ignored if user_id is None - room_id: search for room_id. Ignored if room_id is None - Returns: - Tuple of: - json list of event reports - total number of event reports matching the filter criteria - """ - - def _get_event_reports_paginate_txn( - txn: LoggingTransaction, - ) -> Tuple[List[Dict[str, Any]], int]: - filters = [] - args: List[object] = [] - - if user_id: - filters.append("er.user_id LIKE ?") - args.extend(["%" + user_id + "%"]) - if room_id: - filters.append("er.room_id LIKE ?") - args.extend(["%" + room_id + "%"]) - - if direction == Direction.BACKWARDS: - order = "DESC" - else: - order = "ASC" - - where_clause = "WHERE " + " AND ".join(filters) if len(filters) > 0 else "" - - # We join on room_stats_state despite not using any columns from it - # because the join can influence the number of rows returned; - # e.g. a room that doesn't have state, maybe because it was deleted. - # The query returning the total count should be consistent with - # the query returning the results. - sql = """ - SELECT COUNT(*) as total_event_reports - FROM event_reports AS er - JOIN room_stats_state ON room_stats_state.room_id = er.room_id - {} - """.format( - where_clause - ) - txn.execute(sql, args) - count = cast(Tuple[int], txn.fetchone())[0] - - sql = """ - SELECT - er.id, - er.received_ts, - er.room_id, - er.event_id, - er.user_id, - er.content, - events.sender, - room_stats_state.canonical_alias, - room_stats_state.name - FROM event_reports AS er - LEFT JOIN events - ON events.event_id = er.event_id - JOIN room_stats_state - ON room_stats_state.room_id = er.room_id - {where_clause} - ORDER BY er.received_ts {order} - LIMIT ? - OFFSET ? - """.format( - where_clause=where_clause, - order=order, - ) - - args += [limit, start] - txn.execute(sql, args) - - event_reports = [] - for row in txn: - try: - s = db_to_json(row[5]).get("score") - r = db_to_json(row[5]).get("reason") - except Exception: - logger.error("Unable to parse json from event_reports: %s", row[0]) - continue - event_reports.append( - { - "id": row[0], - "received_ts": row[1], - "room_id": row[2], - "event_id": row[3], - "user_id": row[4], - "score": s, - "reason": r, - "sender": row[6], - "canonical_alias": row[7], - "name": row[8], - } - ) - - return event_reports, count - - return await self.db_pool.runInteraction( - "get_event_reports_paginate", _get_event_reports_paginate_txn - ) - async def block_room(self, room_id: str, user_id: str) -> None: """Marks the room as blocked. From 7ec1f096d3e66c7cd857e75bef229688c73a9868 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Thu, 2 Mar 2023 12:14:44 +0100 Subject: [PATCH 261/344] Add Sytest jobs with the asyncio reactor enabled (#14101) --- .ci/scripts/calculate_jobs.py | 15 +++++++++++++++ .github/workflows/tests.yml | 1 + changelog.d/14101.misc | 1 + 3 files changed, 17 insertions(+) create mode 100644 changelog.d/14101.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 0cdc20e19c31..b41ec0b6e2dc 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -109,11 +109,26 @@ def set_output(key: str, value: str): "postgres": "multi-postgres", "workers": "workers", }, + { + "sytest-tag": "focal", + "postgres": "multi-postgres", + "workers": "workers", + "reactor": "asyncio", + }, ] if not IS_PR: sytest_tests.extend( [ + { + "sytest-tag": "focal", + "reactor": "asyncio", + }, + { + "sytest-tag": "focal", + "postgres": "postgres", + "reactor": "asyncio", + }, { "sytest-tag": "testing", "postgres": "postgres", diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index cfafeaadc93b..48a33c2f49fa 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -368,6 +368,7 @@ jobs: SYTEST_BRANCH: ${{ github.head_ref }} POSTGRES: ${{ matrix.job.postgres && 1}} MULTI_POSTGRES: ${{ (matrix.job.postgres == 'multi-postgres') && 1}} + ASYNCIO_REACTOR: ${{ (matrix.job.reactor == 'asyncio') && 1 }} WORKERS: ${{ matrix.job.workers && 1 }} BLACKLIST: ${{ matrix.job.workers && 'synapse-blacklist-with-workers' }} TOP: ${{ github.workspace }} diff --git a/changelog.d/14101.misc b/changelog.d/14101.misc new file mode 100644 index 000000000000..c48f40cd3876 --- /dev/null +++ b/changelog.d/14101.misc @@ -0,0 +1 @@ +Run the integration test suites with the asyncio reactor enabled in CI. From 33a85cf08ccf3713599a168ae1ed10d35ada2009 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 2 Mar 2023 07:24:29 -0500 Subject: [PATCH 262/344] Fix conflicting URLs for dehydrated devices. (#15180) --- changelog.d/15180.bugfix | 1 + synapse/rest/client/devices.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15180.bugfix diff --git a/changelog.d/15180.bugfix b/changelog.d/15180.bugfix new file mode 100644 index 000000000000..e7a3dcd41a37 --- /dev/null +++ b/changelog.d/15180.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.78.0 where requests to claim dehydrated devices would fail with a `405` error. diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 486c6dbbc5b3..dab4a77f7e7c 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -255,7 +255,7 @@ class DehydratedDeviceServlet(RestServlet): """ - PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device", releases=()) + PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device$", releases=()) def __init__(self, hs: "HomeServer"): super().__init__() From 8ef324ea6f1390876940989eacc8734fe0d15582 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 2 Mar 2023 08:30:51 -0500 Subject: [PATCH 263/344] Update intentional mentions (MSC3952) to depend on `exact_event_property_contains` (MSC3966). (#15051) This replaces the specific `is_user_mention` push rule condition used in MSC3952 with the generic `exact_event_property_contains` push rule condition from MSC3966. --- changelog.d/15051.misc | 1 + rust/benches/evaluator.rs | 4 -- rust/src/push/base_rules.rs | 9 +++- rust/src/push/evaluator.rs | 50 ++++++++++++--------- rust/src/push/mod.rs | 28 ++++++------ stubs/synapse/synapse_rust/push.pyi | 3 +- synapse/config/experimental.py | 8 +++- synapse/push/bulk_push_rule_evaluator.py | 18 ++------ synapse/push/clientformat.py | 11 ++--- tests/push/test_bulk_push_rule_evaluator.py | 2 + tests/push/test_push_rule_evaluator.py | 33 +------------- 11 files changed, 73 insertions(+), 94 deletions(-) create mode 100644 changelog.d/15051.misc diff --git a/changelog.d/15051.misc b/changelog.d/15051.misc new file mode 100644 index 000000000000..fabfe77d35af --- /dev/null +++ b/changelog.d/15051.misc @@ -0,0 +1 @@ +Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 9a871f569334..7c987d4948e0 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -44,7 +44,6 @@ fn bench_match_exact(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), 10, Some(0), Default::default(), @@ -92,7 +91,6 @@ fn bench_match_word(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), 10, Some(0), Default::default(), @@ -140,7 +138,6 @@ fn bench_match_word_miss(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), 10, Some(0), Default::default(), @@ -188,7 +185,6 @@ fn bench_eval_message(b: &mut Bencher) { let eval = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), 10, Some(0), Default::default(), diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 62de51d91528..3d72a4a4c36e 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -21,13 +21,13 @@ use lazy_static::lazy_static; use serde_json::Value; use super::KnownCondition; -use crate::push::PushRule; use crate::push::RelatedEventMatchTypeCondition; use crate::push::SetTweak; use crate::push::TweakValue; use crate::push::{Action, ExactEventMatchCondition, SimpleJsonValue}; use crate::push::{Condition, EventMatchTypeCondition}; use crate::push::{EventMatchCondition, EventMatchPatternType}; +use crate::push::{ExactEventMatchTypeCondition, PushRule}; const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak { set_tweak: Cow::Borrowed("highlight"), @@ -144,7 +144,12 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ PushRule { rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"), priority_class: 5, - conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::IsUserMention)]), + conditions: Cow::Borrowed(&[Condition::Known( + KnownCondition::ExactEventPropertyContainsType(ExactEventMatchTypeCondition { + key: Cow::Borrowed("content.org.matrix.msc3952.mentions.user_ids"), + value_type: Cow::Borrowed(&EventMatchPatternType::UserId), + }), + )]), actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), default: true, default_enabled: true, diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index a65c645cafe5..55846627cca3 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -13,7 +13,7 @@ // limitations under the License. use std::borrow::Cow; -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::BTreeMap; use crate::push::{EventMatchPatternType, JsonValue}; use anyhow::{Context, Error}; @@ -72,8 +72,6 @@ pub struct PushRuleEvaluator { /// True if the event has a mentions property and MSC3952 support is enabled. has_mentions: bool, - /// The user mentions that were part of the message. - user_mentions: BTreeSet, /// The number of users in the room. room_member_count: u64, @@ -114,7 +112,6 @@ impl PushRuleEvaluator { pub fn py_new( flattened_keys: BTreeMap, has_mentions: bool, - user_mentions: BTreeSet, room_member_count: u64, sender_power_level: Option, notification_power_levels: BTreeMap, @@ -134,7 +131,6 @@ impl PushRuleEvaluator { flattened_keys, body, has_mentions, - user_mentions, room_member_count, notification_power_levels, sender_power_level, @@ -310,15 +306,30 @@ impl PushRuleEvaluator { Some(Cow::Borrowed(pattern)), )? } - KnownCondition::ExactEventPropertyContains(exact_event_match) => { - self.match_exact_event_property_contains(exact_event_match)? - } - KnownCondition::IsUserMention => { - if let Some(uid) = user_id { - self.user_mentions.contains(uid) + KnownCondition::ExactEventPropertyContains(exact_event_match) => self + .match_exact_event_property_contains( + exact_event_match.key.clone(), + exact_event_match.value.clone(), + )?, + KnownCondition::ExactEventPropertyContainsType(exact_event_match) => { + // The `pattern_type` can either be "user_id" or "user_localpart", + // either way if we don't have a `user_id` then the condition can't + // match. + let user_id = if let Some(user_id) = user_id { + user_id } else { - false - } + return Ok(false); + }; + + let pattern = match &*exact_event_match.value_type { + EventMatchPatternType::UserId => user_id, + EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?, + }; + + self.match_exact_event_property_contains( + exact_event_match.key.clone(), + Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())), + )? } KnownCondition::ContainsDisplayName => { if let Some(dn) = display_name { @@ -456,24 +467,21 @@ impl PushRuleEvaluator { /// Evaluates a `exact_event_property_contains` condition. (MSC3758) fn match_exact_event_property_contains( &self, - exact_event_match: &ExactEventMatchCondition, + key: Cow, + value: Cow, ) -> Result { // First check if the feature is enabled. if !self.msc3966_exact_event_property_contains { return Ok(false); } - let value = &exact_event_match.value; - - let haystack = if let Some(JsonValue::Array(haystack)) = - self.flattened_keys.get(&*exact_event_match.key) - { + let haystack = if let Some(JsonValue::Array(haystack)) = self.flattened_keys.get(&*key) { haystack } else { return Ok(false); }; - Ok(haystack.contains(&**value)) + Ok(haystack.contains(&value)) } /// Match the member count against an 'is' condition @@ -510,7 +518,6 @@ fn push_rule_evaluator() { let evaluator = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), 10, Some(0), BTreeMap::new(), @@ -542,7 +549,6 @@ fn test_requires_room_version_supports_condition() { let evaluator = PushRuleEvaluator::py_new( flattened_keys, false, - BTreeSet::new(), 10, Some(0), BTreeMap::new(), diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 97feb6efc924..6391d2ed47de 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -340,8 +340,12 @@ pub enum KnownCondition { RelatedEventMatchType(RelatedEventMatchTypeCondition), #[serde(rename = "org.matrix.msc3966.exact_event_property_contains")] ExactEventPropertyContains(ExactEventMatchCondition), - #[serde(rename = "org.matrix.msc3952.is_user_mention")] - IsUserMention, + // Identical to exact_event_property_contains but gives predefined patterns. Cannot be added by users. + #[serde( + skip_deserializing, + rename = "org.matrix.msc3966.exact_event_property_contains" + )] + ExactEventPropertyContainsType(ExactEventMatchTypeCondition), ContainsDisplayName, RoomMemberCount { #[serde(skip_serializing_if = "Option::is_none")] @@ -398,6 +402,15 @@ pub struct ExactEventMatchCondition { pub value: Cow<'static, SimpleJsonValue>, } +/// The body of a [`Condition::ExactEventMatch`] that uses user_id or user_localpart as a pattern. +#[derive(Serialize, Debug, Clone)] +pub struct ExactEventMatchTypeCondition { + pub key: Cow<'static, str>, + // During serialization, the pattern_type property gets replaced with a + // pattern property of the correct value in synapse.push.clientformat.format_push_rules_for_user. + pub value_type: Cow<'static, EventMatchPatternType>, +} + /// The body of a [`Condition::RelatedEventMatch`] #[derive(Serialize, Deserialize, Debug, Clone)] pub struct RelatedEventMatchCondition { @@ -739,17 +752,6 @@ fn test_deserialize_unstable_msc3758_condition() { )); } -#[test] -fn test_deserialize_unstable_msc3952_user_condition() { - let json = r#"{"kind":"org.matrix.msc3952.is_user_mention"}"#; - - let condition: Condition = serde_json::from_str(json).unwrap(); - assert!(matches!( - condition, - Condition::Known(KnownCondition::IsUserMention) - )); -} - #[test] fn test_deserialize_custom_condition() { let json = r#"{"kind":"custom_tag"}"#; diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index a8f0ed2435c5..c17796ffbd6b 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Set, Tuple, Union +from typing import Any, Collection, Dict, Mapping, Optional, Sequence, Tuple, Union from synapse.types import JsonDict, JsonValue @@ -58,7 +58,6 @@ class PushRuleEvaluator: self, flattened_keys: Mapping[str, JsonValue], has_mentions: bool, - user_mentions: Set[str], room_member_count: int, sender_power_level: Optional[int], notification_power_levels: Mapping[str, int], diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 7c81f055b60f..fc64f2bda11d 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -179,10 +179,16 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "msc3873_escape_event_match_key", False ) - # MSC3952: Intentional mentions, this depends on MSC3758. + # MSC3966: exact_event_property_contains push rule condition. + self.msc3966_exact_event_property_contains = experimental.get( + "msc3966_exact_event_property_contains", False + ) + + # MSC3952: Intentional mentions, this depends on MSC3758 and MSC3966. self.msc3952_intentional_mentions = ( experimental.get("msc3952_intentional_mentions", False) and self.msc3758_exact_event_match + and self.msc3966_exact_event_property_contains ) # MSC3959: Do not generate notifications for edits. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 3c4a152d6b72..abcf687f0528 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -23,7 +23,6 @@ Mapping, Optional, Sequence, - Set, Tuple, Union, ) @@ -396,18 +395,10 @@ async def _action_for_event_by_user( del notification_levels[key] # Pull out any user and room mentions. - mentions = event.content.get(EventContentFields.MSC3952_MENTIONS) - has_mentions = self._intentional_mentions_enabled and isinstance(mentions, dict) - user_mentions: Set[str] = set() - if has_mentions: - # mypy seems to have lost the type even though it must be a dict here. - assert isinstance(mentions, dict) - # Remove out any non-string items and convert to a set. - user_mentions_raw = mentions.get("user_ids") - if isinstance(user_mentions_raw, list): - user_mentions = set( - filter(lambda item: isinstance(item, str), user_mentions_raw) - ) + has_mentions = ( + self._intentional_mentions_enabled + and EventContentFields.MSC3952_MENTIONS in event.content + ) evaluator = PushRuleEvaluator( _flatten_dict( @@ -415,7 +406,6 @@ async def _action_for_event_by_user( msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key, ), has_mentions, - user_mentions, room_member_count, sender_power_level, notification_levels, diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index bb76c169c6ce..222afbdcc872 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -41,11 +41,12 @@ def format_push_rules_for_user( rulearray.append(template_rule) - pattern_type = template_rule.pop("pattern_type", None) - if pattern_type == "user_id": - template_rule["pattern"] = user.to_string() - elif pattern_type == "user_localpart": - template_rule["pattern"] = user.localpart + for type_key in ("pattern", "value"): + type_value = template_rule.pop(f"{type_key}_type", None) + if type_value == "user_id": + template_rule[type_key] = user.to_string() + elif type_value == "user_localpart": + template_rule[type_key] = user.localpart template_rule["enabled"] = enabled diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 1458076a90ba..73fecfd4adf4 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -233,6 +233,7 @@ def _create_and_process( "experimental_features": { "msc3758_exact_event_match": True, "msc3952_intentional_mentions": True, + "msc3966_exact_event_property_contains": True, } } ) @@ -336,6 +337,7 @@ def test_user_mentions(self) -> None: "experimental_features": { "msc3758_exact_event_match": True, "msc3952_intentional_mentions": True, + "msc3966_exact_event_property_contains": True, } } ) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 1d30e3c3e4cf..d4a4bc4d93cb 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict, List, Optional, Set, Union, cast +from typing import Any, Dict, List, Optional, Union, cast import frozendict @@ -147,8 +147,6 @@ def _get_evaluator( self, content: JsonMapping, *, - has_mentions: bool = False, - user_mentions: Optional[Set[str]] = None, related_events: Optional[JsonDict] = None, ) -> PushRuleEvaluator: event = FrozenEvent( @@ -167,8 +165,7 @@ def _get_evaluator( power_levels: Dict[str, Union[int, Dict[str, int]]] = {} return PushRuleEvaluator( _flatten_dict(event), - has_mentions, - user_mentions or set(), + False, room_member_count, sender_power_level, cast(Dict[str, int], power_levels.get("notifications", {})), @@ -204,32 +201,6 @@ def test_display_name(self) -> None: # A display name with spaces should work fine. self.assertTrue(evaluator.matches(condition, "@user:test", "foo bar")) - def test_user_mentions(self) -> None: - """Check for user mentions.""" - condition = {"kind": "org.matrix.msc3952.is_user_mention"} - - # No mentions shouldn't match. - evaluator = self._get_evaluator({}, has_mentions=True) - self.assertFalse(evaluator.matches(condition, "@user:test", None)) - - # An empty set shouldn't match - evaluator = self._get_evaluator({}, has_mentions=True, user_mentions=set()) - self.assertFalse(evaluator.matches(condition, "@user:test", None)) - - # The Matrix ID appearing anywhere in the mentions list should match - evaluator = self._get_evaluator( - {}, has_mentions=True, user_mentions={"@user:test"} - ) - self.assertTrue(evaluator.matches(condition, "@user:test", None)) - - evaluator = self._get_evaluator( - {}, has_mentions=True, user_mentions={"@another:test", "@user:test"} - ) - self.assertTrue(evaluator.matches(condition, "@user:test", None)) - - # Note that invalid data is tested at tests.push.test_bulk_push_rule_evaluator.TestBulkPushRuleEvaluator.test_mentions - # since the BulkPushRuleEvaluator is what handles data sanitisation. - def _assert_matches( self, condition: JsonDict, content: JsonMapping, msg: Optional[str] = None ) -> None: From c4f4dc35cd29834870693635a791ad932caf074e Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 2 Mar 2023 15:55:26 +0000 Subject: [PATCH 264/344] Dockerfile-workers: spell out when config isn't generated (#15186) * Complement: Spell out when config isn't generated * Changelog --- changelog.d/15186.docker | 1 + docker/configure_workers_and_start.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15186.docker diff --git a/changelog.d/15186.docker b/changelog.d/15186.docker new file mode 100644 index 000000000000..5e436ff7e2a9 --- /dev/null +++ b/changelog.d/15186.docker @@ -0,0 +1 @@ +Improve startup logging in the with-workers Docker image. diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 7f615e50663f..81368069ec45 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -675,17 +675,21 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None: if not os.path.exists(config_path): log("Generating base homeserver config") generate_base_homeserver_config() - + else: + log("Base homeserver config exists—not regenerating") # This script may be run multiple times (mostly by Complement, see note at top of file). # Don't re-configure workers in this instance. mark_filepath = "/conf/workers_have_been_configured" if not os.path.exists(mark_filepath): # Always regenerate all other config files + log("Generating worker config files") generate_worker_files(environ, config_path, data_dir) # Mark workers as being configured with open(mark_filepath, "w") as f: f.write("") + else: + log("Worker config exists—not regenerating") # Lifted right out of start.py jemallocpath = "/usr/lib/%s-linux-gnu/libjemalloc.so.2" % (platform.machine(),) From c8665dd25d18fa7d7176984cee191834002909a0 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Thu, 2 Mar 2023 18:16:54 +0100 Subject: [PATCH 265/344] Remove the unspecced and bugged PUT /knock/{roomIdOrAlias} endpoint (#15189) --- changelog.d/15189.misc | 1 + synapse/rest/client/knock.py | 16 +--------------- 2 files changed, 2 insertions(+), 15 deletions(-) create mode 100644 changelog.d/15189.misc diff --git a/changelog.d/15189.misc b/changelog.d/15189.misc new file mode 100644 index 000000000000..ded2feb79e4a --- /dev/null +++ b/changelog.d/15189.misc @@ -0,0 +1 @@ +Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py index ad025c8a4529..10975224c081 100644 --- a/synapse/rest/client/knock.py +++ b/synapse/rest/client/knock.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Awaitable, Dict, List, Optional, Tuple +from typing import TYPE_CHECKING, Dict, List, Tuple from synapse.api.constants import Membership from synapse.api.errors import SynapseError @@ -24,8 +24,6 @@ parse_strings_from_args, ) from synapse.http.site import SynapseRequest -from synapse.logging.opentracing import set_tag -from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import JsonDict, RoomAlias, RoomID if TYPE_CHECKING: @@ -45,7 +43,6 @@ class KnockRoomAliasServlet(RestServlet): def __init__(self, hs: "HomeServer"): super().__init__() - self.txns = HttpTransactionCache(hs) self.room_member_handler = hs.get_room_member_handler() self.auth = hs.get_auth() @@ -53,7 +50,6 @@ async def on_POST( self, request: SynapseRequest, room_identifier: str, - txn_id: Optional[str] = None, ) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) @@ -86,7 +82,6 @@ async def on_POST( target=requester.user, room_id=room_id, action=Membership.KNOCK, - txn_id=txn_id, third_party_signed=None, remote_room_hosts=remote_room_hosts, content=event_content, @@ -94,15 +89,6 @@ async def on_POST( return 200, {"room_id": room_id} - def on_PUT( - self, request: SynapseRequest, room_identifier: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: - set_tag("txn_id", txn_id) - - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_identifier, txn_id - ) - def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: KnockRoomAliasServlet(hs).register(http_server) From ecbe0ddbe7c47e05bc27b39dc10a9c30eafd2960 Mon Sep 17 00:00:00 2001 From: Dirk Klimpel <5740567+dklimpel@users.noreply.github.com> Date: Thu, 2 Mar 2023 18:59:53 +0100 Subject: [PATCH 266/344] Add support for knocking to workers. (#15133) --- changelog.d/15133.feature | 1 + docker/configure_workers_and_start.py | 1 + docs/workers.md | 1 + synapse/handlers/room_member.py | 4 +++- synapse/handlers/room_member_worker.py | 4 +++- synapse/replication/http/membership.py | 15 ++++----------- synapse/rest/__init__.py | 2 +- synapse/rest/client/knock.py | 1 - synapse/rest/client/room.py | 2 +- 9 files changed, 15 insertions(+), 16 deletions(-) create mode 100644 changelog.d/15133.feature diff --git a/changelog.d/15133.feature b/changelog.d/15133.feature new file mode 100644 index 000000000000..e0af0d455440 --- /dev/null +++ b/changelog.d/15133.feature @@ -0,0 +1 @@ +Add support for knocking to workers. \ No newline at end of file diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 81368069ec45..add8bb1ff61e 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -205,6 +205,7 @@ "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send", "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$", "^/_matrix/client/(api/v1|r0|v3|unstable)/join/", + "^/_matrix/client/(api/v1|r0|v3|unstable)/knock/", "^/_matrix/client/(api/v1|r0|v3|unstable)/profile/", "^/_matrix/client/(v1|unstable/org.matrix.msc2716)/rooms/.*/batch_send", ], diff --git a/docs/workers.md b/docs/workers.md index 35a96f12a91b..fa536cd31081 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -252,6 +252,7 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/ ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$ ^/_matrix/client/(api/v1|r0|v3|unstable)/join/ + ^/_matrix/client/(api/v1|r0|v3|unstable)/knock/ ^/_matrix/client/(api/v1|r0|v3|unstable)/profile/ # Account data requests diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index de7476f3005d..509c5578895b 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -207,6 +207,7 @@ async def _remote_join( @abc.abstractmethod async def remote_knock( self, + requester: Requester, remote_room_hosts: List[str], room_id: str, user: UserID, @@ -1073,7 +1074,7 @@ async def update_membership_locked( ) return await self.remote_knock( - remote_room_hosts, room_id, target, content + requester, remote_room_hosts, room_id, target, content ) return await self._local_membership_update( @@ -1984,6 +1985,7 @@ async def _generate_local_out_of_band_leave( async def remote_knock( self, + requester: Requester, remote_room_hosts: List[str], room_id: str, user: UserID, diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index ba261702d4bc..76e36b8a6d53 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -113,6 +113,7 @@ async def remote_rescind_knock( async def remote_knock( self, + requester: Requester, remote_room_hosts: List[str], room_id: str, user: UserID, @@ -123,9 +124,10 @@ async def remote_knock( Implements RoomMemberHandler.remote_knock """ ret = await self._remote_knock_client( + requester=requester, remote_room_hosts=remote_room_hosts, room_id=room_id, - user=user, + user_id=user.to_string(), content=content, ) return ret["event_id"], ret["stream_id"] diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index 9fa1060d48f6..67b01db67e7f 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -142,17 +142,12 @@ async def _serialize_payload( # type: ignore[override] } async def _handle_request( # type: ignore[override] - self, - request: SynapseRequest, - content: JsonDict, - room_id: str, - user_id: str, + self, request: SynapseRequest, content: JsonDict, room_id: str, user_id: str ) -> Tuple[int, JsonDict]: remote_room_hosts = content["remote_room_hosts"] event_content = content["content"] requester = Requester.deserialize(self.store, content["requester"]) - request.requester = requester logger.debug("remote_knock: %s on room: %s", user_id, room_id) @@ -277,16 +272,12 @@ async def _serialize_payload( # type: ignore[override] } async def _handle_request( # type: ignore[override] - self, - request: SynapseRequest, - content: JsonDict, - knock_event_id: str, + self, request: SynapseRequest, content: JsonDict, knock_event_id: str ) -> Tuple[int, JsonDict]: txn_id = content["txn_id"] event_content = content["content"] requester = Requester.deserialize(self.store, content["requester"]) - request.requester = requester # hopefully we're now on the master, so this won't recurse! @@ -363,3 +354,5 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReplicationRemoteJoinRestServlet(hs).register(http_server) ReplicationRemoteRejectInviteRestServlet(hs).register(http_server) ReplicationUserJoinedLeftRoomRestServlet(hs).register(http_server) + ReplicationRemoteKnockRestServlet(hs).register(http_server) + ReplicationRemoteRescindKnockRestServlet(hs).register(http_server) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index c327f1504369..2e19e055d3aa 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -139,7 +139,7 @@ def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None: relations.register_servlets(hs, client_resource) if is_main_process: password_policy.register_servlets(hs, client_resource) - knock.register_servlets(hs, client_resource) + knock.register_servlets(hs, client_resource) # moving to /_synapse/admin if is_main_process: diff --git a/synapse/rest/client/knock.py b/synapse/rest/client/knock.py index 10975224c081..4fa66904baa1 100644 --- a/synapse/rest/client/knock.py +++ b/synapse/rest/client/knock.py @@ -63,7 +63,6 @@ async def on_POST( # twisted.web.server.Request.args is incorrectly defined as Optional[Any] args: Dict[bytes, List[bytes]] = request.args # type: ignore - remote_room_hosts = parse_strings_from_args( args, "server_name", required=False ) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 14b04810a191..45aee3d3fe39 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -926,7 +926,7 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() def register(self, http_server: HttpServer) -> None: - # /rooms/$roomid/[invite|join|leave] + # /rooms/$roomid/[join|invite|leave|ban|unban|kick] PATTERNS = ( "/rooms/(?P[^/]*)/" "(?Pjoin|invite|leave|ban|unban|kick)" From 1eea662780a6325af0a61ceb447b4c91a2d3ac98 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 2 Mar 2023 18:27:00 +0000 Subject: [PATCH 267/344] Add a `get_next_txn` method to `StreamIdGenerator` to match `MultiWriterIdGenerator` (#15191 --- changelog.d/15191.misc | 1 + .../storage/databases/main/account_data.py | 11 +---- synapse/storage/util/id_generators.py | 45 ++++++++++++++++++- synapse/storage/util/sequence.py | 2 +- 4 files changed, 48 insertions(+), 11 deletions(-) create mode 100644 changelog.d/15191.misc diff --git a/changelog.d/15191.misc b/changelog.d/15191.misc new file mode 100644 index 000000000000..579f76d451ff --- /dev/null +++ b/changelog.d/15191.misc @@ -0,0 +1 @@ +Add a `get_next_txn` method to `StreamIdGenerator` to match `MultiWriterIdGenerator`. \ No newline at end of file diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 308d19440f63..2d2ba74347e1 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -40,7 +40,6 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, - AbstractStreamIdTracker, MultiWriterIdGenerator, StreamIdGenerator, ) @@ -64,14 +63,12 @@ def __init__( ): super().__init__(database, db_conn, hs) - # `_can_write_to_account_data` indicates whether the current worker is allowed - # to write account data. A value of `True` implies that `_account_data_id_gen` - # is an `AbstractStreamIdGenerator` and not just a tracker. - self._account_data_id_gen: AbstractStreamIdTracker self._can_write_to_account_data = ( self._instance_name in hs.config.worker.writers.account_data ) + self._account_data_id_gen: AbstractStreamIdGenerator + if isinstance(database.engine, PostgresEngine): self._account_data_id_gen = MultiWriterIdGenerator( db_conn=db_conn, @@ -558,7 +555,6 @@ async def add_account_data_to_room( The maximum stream ID. """ assert self._can_write_to_account_data - assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator) content_json = json_encoder.encode(content) @@ -598,7 +594,6 @@ async def remove_account_data_for_room( data to delete. """ assert self._can_write_to_account_data - assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator) def _remove_account_data_for_room_txn( txn: LoggingTransaction, next_id: int @@ -663,7 +658,6 @@ async def add_account_data_for_user( The maximum stream ID. """ assert self._can_write_to_account_data - assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator) async with self._account_data_id_gen.get_next() as next_id: await self.db_pool.runInteraction( @@ -770,7 +764,6 @@ async def remove_account_data_for_user( to delete. """ assert self._can_write_to_account_data - assert isinstance(self._account_data_id_gen, AbstractStreamIdGenerator) def _remove_account_data_for_user_txn( txn: LoggingTransaction, next_id: int diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 9adff3f4f523..334d3d718b4b 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -158,6 +158,15 @@ def get_next_mult(self, n: int) -> AsyncContextManager[Sequence[int]]: """ raise NotImplementedError() + @abc.abstractmethod + def get_next_txn(self, txn: LoggingTransaction) -> int: + """ + Usage: + stream_id_gen.get_next_txn(txn) + # ... persist events ... + """ + raise NotImplementedError() + class StreamIdGenerator(AbstractStreamIdGenerator): """Generates and tracks stream IDs for a stream with a single writer. @@ -263,6 +272,40 @@ def manager() -> Generator[Sequence[int], None, None]: return _AsyncCtxManagerWrapper(manager()) + def get_next_txn(self, txn: LoggingTransaction) -> int: + """ + Retrieve the next stream ID from within a database transaction. + + Clean-up functions will be called when the transaction finishes. + + Args: + txn: The database transaction object. + + Returns: + The next stream ID. + """ + if not self._is_writer: + raise Exception("Tried to allocate stream ID on non-writer") + + # Get the next stream ID. + with self._lock: + self._current += self._step + next_id = self._current + + self._unfinished_ids[next_id] = next_id + + def clear_unfinished_id(id_to_clear: int) -> None: + """A function to mark processing this ID as finished""" + with self._lock: + self._unfinished_ids.pop(id_to_clear) + + # Mark this ID as finished once the database transaction itself finishes. + txn.call_after(clear_unfinished_id, next_id) + txn.call_on_exception(clear_unfinished_id, next_id) + + # Return the new ID. + return next_id + def get_current_token(self) -> int: if not self._is_writer: return self._current @@ -568,7 +611,7 @@ def get_next_txn(self, txn: LoggingTransaction) -> int: """ Usage: - stream_id = stream_id_gen.get_next(txn) + stream_id = stream_id_gen.get_next_txn(txn) # ... persist event ... """ diff --git a/synapse/storage/util/sequence.py b/synapse/storage/util/sequence.py index 75268cbe1595..80915216de94 100644 --- a/synapse/storage/util/sequence.py +++ b/synapse/storage/util/sequence.py @@ -205,7 +205,7 @@ def __init__(self, get_first_callback: GetFirstCallbackType): """ Args: get_first_callback: a callback which is called on the first call to - get_next_id_txn; should return the curreent maximum id + get_next_id_txn; should return the current maximum id """ # the callback. this is cleared after it is called, so that it can be GCed. self._callback: Optional[GetFirstCallbackType] = get_first_callback From 15e975f68fc354843a0647e53f285696e86de89b Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 3 Mar 2023 10:51:57 +0000 Subject: [PATCH 268/344] Experimental MSC3890 Implementation: Fix deleting account data when using an account data writer worker (#14869) --- changelog.d/14869.bugfix | 1 + synapse/handlers/account_data.py | 7 ---- .../storage/databases/main/account_data.py | 34 ++++++++----------- 3 files changed, 16 insertions(+), 26 deletions(-) create mode 100644 changelog.d/14869.bugfix diff --git a/changelog.d/14869.bugfix b/changelog.d/14869.bugfix new file mode 100644 index 000000000000..865b597741bb --- /dev/null +++ b/changelog.d/14869.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in v1.75.0rc1 that caused experimental support for deleting account data to raise an internal server error while using an account data writer worker. \ No newline at end of file diff --git a/synapse/handlers/account_data.py b/synapse/handlers/account_data.py index 797de46dbc3b..7e01c18c6c90 100644 --- a/synapse/handlers/account_data.py +++ b/synapse/handlers/account_data.py @@ -155,9 +155,6 @@ async def remove_account_data_for_room( max_stream_id = await self._store.remove_account_data_for_room( user_id, room_id, account_data_type ) - if max_stream_id is None: - # The referenced account data did not exist, so no delete occurred. - return None self._notifier.on_new_event( StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id] @@ -230,9 +227,6 @@ async def remove_account_data_for_user( max_stream_id = await self._store.remove_account_data_for_user( user_id, account_data_type ) - if max_stream_id is None: - # The referenced account data did not exist, so no delete occurred. - return None self._notifier.on_new_event( StreamKeyType.ACCOUNT_DATA, max_stream_id, users=[user_id] @@ -248,7 +242,6 @@ async def remove_account_data_for_user( instance_name=random.choice(self._account_data_writers), user_id=user_id, account_data_type=account_data_type, - content={}, ) return response["max_stream_id"] diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 2d2ba74347e1..a9843f6e174a 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -581,7 +581,7 @@ async def add_account_data_to_room( async def remove_account_data_for_room( self, user_id: str, room_id: str, account_data_type: str - ) -> Optional[int]: + ) -> int: """Delete the room account data for the user of a given type. Args: @@ -632,15 +632,13 @@ def _remove_account_data_for_room_txn( next_id, ) - if not row_updated: - return None - - self._account_data_stream_cache.entity_has_changed(user_id, next_id) - self.get_room_account_data_for_user.invalidate((user_id,)) - self.get_account_data_for_room.invalidate((user_id, room_id)) - self.get_account_data_for_room_and_type.prefill( - (user_id, room_id, account_data_type), {} - ) + if row_updated: + self._account_data_stream_cache.entity_has_changed(user_id, next_id) + self.get_room_account_data_for_user.invalidate((user_id,)) + self.get_account_data_for_room.invalidate((user_id, room_id)) + self.get_account_data_for_room_and_type.prefill( + (user_id, room_id, account_data_type), {} + ) return self._account_data_id_gen.get_current_token() @@ -747,7 +745,7 @@ async def remove_account_data_for_user( self, user_id: str, account_data_type: str, - ) -> Optional[int]: + ) -> int: """ Delete a single piece of user account data by type. @@ -833,14 +831,12 @@ def _remove_account_data_for_user_txn( next_id, ) - if not row_updated: - return None - - self._account_data_stream_cache.entity_has_changed(user_id, next_id) - self.get_global_account_data_for_user.invalidate((user_id,)) - self.get_global_account_data_by_type_for_user.prefill( - (user_id, account_data_type), {} - ) + if row_updated: + self._account_data_stream_cache.entity_has_changed(user_id, next_id) + self.get_global_account_data_for_user.invalidate((user_id,)) + self.get_global_account_data_by_type_for_user.prefill( + (user_id, account_data_type), {} + ) return self._account_data_id_gen.get_current_token() From 7ae4f7236a0873e490c7f5dc5d69f3b922818cb4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 3 Mar 2023 07:13:03 -0500 Subject: [PATCH 269/344] Configure ruff to automatically fix issues. (#15194) --- changelog.d/15194.misc | 1 + scripts-dev/lint.sh | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15194.misc diff --git a/changelog.d/15194.misc b/changelog.d/15194.misc new file mode 100644 index 000000000000..931bf5448a9b --- /dev/null +++ b/changelog.d/15194.misc @@ -0,0 +1 @@ +Automatically fix errors with `ruff`. diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 392c509a8ac1..9e4ed3246e7a 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -112,7 +112,7 @@ python3 -m black "${files[@]}" # Catch any common programming mistakes in Python code. # --quiet suppresses the update check. -ruff --quiet "${files[@]}" +ruff --quiet --fix "${files[@]}" # Catch any common programming mistakes in Rust code. # From 848f7e3d5ff38ca28e56e6143e584974da1eec42 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Fri, 3 Mar 2023 13:22:49 +0100 Subject: [PATCH 270/344] Remove unspecced and buggy `PUT` method on the unstable `/rooms//batch_send` endpoint. (#15199) --- changelog.d/15199.misc | 1 + synapse/rest/client/room_batch.py | 16 +--------------- 2 files changed, 2 insertions(+), 15 deletions(-) create mode 100644 changelog.d/15199.misc diff --git a/changelog.d/15199.misc b/changelog.d/15199.misc new file mode 100644 index 000000000000..145b03fe1651 --- /dev/null +++ b/changelog.d/15199.misc @@ -0,0 +1 @@ +Remove unspecced and buggy `PUT` method on the unstable `/rooms//batch_send` endpoint. diff --git a/synapse/rest/client/room_batch.py b/synapse/rest/client/room_batch.py index 10be4a781b1d..ef284ecc1147 100644 --- a/synapse/rest/client/room_batch.py +++ b/synapse/rest/client/room_batch.py @@ -15,9 +15,7 @@ import logging import re from http import HTTPStatus -from typing import TYPE_CHECKING, Awaitable, Tuple - -from twisted.web.server import Request +from typing import TYPE_CHECKING, Tuple from synapse.api.constants import EventContentFields from synapse.api.errors import AuthError, Codes, SynapseError @@ -30,7 +28,6 @@ parse_strings_from_args, ) from synapse.http.site import SynapseRequest -from synapse.rest.client.transactions import HttpTransactionCache from synapse.types import JsonDict if TYPE_CHECKING: @@ -79,7 +76,6 @@ def __init__(self, hs: "HomeServer"): self.event_creation_handler = hs.get_event_creation_handler() self.auth = hs.get_auth() self.room_batch_handler = hs.get_room_batch_handler() - self.txns = HttpTransactionCache(hs) async def on_POST( self, request: SynapseRequest, room_id: str @@ -249,16 +245,6 @@ async def on_POST( return HTTPStatus.OK, response_dict - def on_GET(self, request: Request, room_id: str) -> Tuple[int, str]: - return HTTPStatus.NOT_IMPLEMENTED, "Not implemented" - - def on_PUT( - self, request: SynapseRequest, room_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_id - ) - def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: msc2716_enabled = hs.config.experimental.msc2716_enabled From 02f74f3a997a4356b5bda957ebc51a829dad15f9 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Fri, 3 Mar 2023 08:13:37 -0500 Subject: [PATCH 271/344] Combine AbstractStreamIdTracker and AbstractStreamIdGenerator. (#15192) AbstractStreamIdTracker (now) has only a single sub-class: AbstractStreamIdGenerator, combine them to simplify some code and remove any direct references to AbstractStreamIdTracker. --- changelog.d/15192.misc | 1 + synapse/storage/databases/main/devices.py | 7 ++----- synapse/storage/databases/main/events_worker.py | 5 ++--- synapse/storage/databases/main/push_rule.py | 3 +-- synapse/storage/databases/main/pusher.py | 3 +-- synapse/storage/databases/main/receipts.py | 6 +++--- synapse/storage/util/id_generators.py | 17 +++++------------ 7 files changed, 15 insertions(+), 27 deletions(-) create mode 100644 changelog.d/15192.misc diff --git a/changelog.d/15192.misc b/changelog.d/15192.misc new file mode 100644 index 000000000000..107668687583 --- /dev/null +++ b/changelog.d/15192.misc @@ -0,0 +1 @@ +Combine `AbstractStreamIdTracker` and `AbstractStreamIdGenerator`. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index 0dd15f16ff7b..5503621ad613 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -52,7 +52,6 @@ from synapse.storage.types import Cursor from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, - AbstractStreamIdTracker, StreamIdGenerator, ) from synapse.types import JsonDict, StrCollection, get_verify_key_from_cross_signing_key @@ -91,7 +90,7 @@ def __init__( # In the worker store this is an ID tracker which we overwrite in the non-worker # class below that is used on the main process. - self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + self._device_list_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), "device_lists_stream", @@ -712,9 +711,7 @@ async def add_user_signature_change_to_streams( The new stream ID. """ - # TODO: this looks like it's _writing_. Should this be on DeviceStore rather - # than DeviceWorkerStore? - async with self._device_list_id_gen.get_next() as stream_id: # type: ignore[attr-defined] + async with self._device_list_id_gen.get_next() as stream_id: await self.db_pool.runInteraction( "add_user_sig_change_to_streams", self._add_user_signature_change_txn, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index b7e74981256c..20b7a683622c 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -72,7 +72,6 @@ from synapse.storage.types import Cursor from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, - AbstractStreamIdTracker, MultiWriterIdGenerator, StreamIdGenerator, ) @@ -187,8 +186,8 @@ def __init__( ): super().__init__(database, db_conn, hs) - self._stream_id_gen: AbstractStreamIdTracker - self._backfill_id_gen: AbstractStreamIdTracker + self._stream_id_gen: AbstractStreamIdGenerator + self._backfill_id_gen: AbstractStreamIdGenerator if isinstance(database.engine, PostgresEngine): # If we're using Postgres than we can use `MultiWriterIdGenerator` # regardless of whether this process writes to the streams or not. diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 9b2bbe060ddc..9f862f00c1c5 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -46,7 +46,6 @@ from synapse.storage.push_rule import InconsistentRuleException, RuleNotFoundException from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, - AbstractStreamIdTracker, IdGenerator, StreamIdGenerator, ) @@ -118,7 +117,7 @@ def __init__( # In the worker store this is an ID tracker which we overwrite in the non-worker # class below that is used on the main process. - self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + self._push_rules_stream_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), "push_rules_stream", diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index fddbc07afa52..9a24f7a65526 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -36,7 +36,6 @@ ) from synapse.storage.util.id_generators import ( AbstractStreamIdGenerator, - AbstractStreamIdTracker, StreamIdGenerator, ) from synapse.types import JsonDict @@ -60,7 +59,7 @@ def __init__( # In the worker store this is an ID tracker which we overwrite in the non-worker # class below that is used on the main process. - self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( + self._pushers_id_gen = StreamIdGenerator( db_conn, hs.get_replication_notifier(), "pushers", diff --git a/synapse/storage/databases/main/receipts.py b/synapse/storage/databases/main/receipts.py index 92a82240ab4c..074942b16785 100644 --- a/synapse/storage/databases/main/receipts.py +++ b/synapse/storage/databases/main/receipts.py @@ -39,7 +39,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.engines._base import IsolationLevel from synapse.storage.util.id_generators import ( - AbstractStreamIdTracker, + AbstractStreamIdGenerator, MultiWriterIdGenerator, StreamIdGenerator, ) @@ -65,7 +65,7 @@ def __init__( # In the worker store this is an ID tracker which we overwrite in the non-worker # class below that is used on the main process. - self._receipts_id_gen: AbstractStreamIdTracker + self._receipts_id_gen: AbstractStreamIdGenerator if isinstance(database.engine, PostgresEngine): self._can_write_to_receipts = ( @@ -768,7 +768,7 @@ async def insert_receipt( "insert_receipt_conv", self._graph_to_linear, room_id, event_ids ) - async with self._receipts_id_gen.get_next() as stream_id: # type: ignore[attr-defined] + async with self._receipts_id_gen.get_next() as stream_id: event_ts = await self.db_pool.runInteraction( "insert_linearized_receipt", self._insert_linearized_receipt_txn, diff --git a/synapse/storage/util/id_generators.py b/synapse/storage/util/id_generators.py index 334d3d718b4b..d2c874b9a8d7 100644 --- a/synapse/storage/util/id_generators.py +++ b/synapse/storage/util/id_generators.py @@ -93,8 +93,11 @@ def _load_current_id( return res -class AbstractStreamIdTracker(metaclass=abc.ABCMeta): - """Tracks the "current" stream ID of a stream that may have multiple writers. +class AbstractStreamIdGenerator(metaclass=abc.ABCMeta): + """Generates or tracks stream IDs for a stream that may have multiple writers. + + Each stream ID represents a write transaction, whose completion is tracked + so that the "current" stream ID of the stream can be determined. Stream IDs are monotonically increasing or decreasing integers representing write transactions. The "current" stream ID is the stream ID such that all transactions @@ -130,16 +133,6 @@ def get_current_token_for_writer(self, instance_name: str) -> int: """ raise NotImplementedError() - -class AbstractStreamIdGenerator(AbstractStreamIdTracker): - """Generates stream IDs for a stream that may have multiple writers. - - Each stream ID represents a write transaction, whose completion is tracked - so that the "current" stream ID of the stream can be determined. - - See `AbstractStreamIdTracker` for more details. - """ - @abc.abstractmethod def get_next(self) -> AsyncContextManager[int]: """ From 6b6e91e6106e84c391c9635fc830aa6081119c71 Mon Sep 17 00:00:00 2001 From: 6543 <6543@obermui.de> Date: Fri, 3 Mar 2023 15:22:06 +0100 Subject: [PATCH 272/344] Fix ICU tests on alpine / macOS. (#15177) The word boundary behaviour is slightly different, consider it acceptable for the tests. --- changelog.d/15177.bugfix | 1 + tests/storage/test_user_directory.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/15177.bugfix diff --git a/changelog.d/15177.bugfix b/changelog.d/15177.bugfix new file mode 100644 index 000000000000..b9764947eb2e --- /dev/null +++ b/changelog.d/15177.bugfix @@ -0,0 +1 @@ +Fix test_icu_word_boundary_punctuation for alpine / macos installed ICU versions. diff --git a/tests/storage/test_user_directory.py b/tests/storage/test_user_directory.py index 43b724c4ddd7..8c72aa17225a 100644 --- a/tests/storage/test_user_directory.py +++ b/tests/storage/test_user_directory.py @@ -696,6 +696,8 @@ def test_icu_word_boundary_punctuation(self) -> None: ["lazy'fox", "jumped", "over", "the", "dog"], # ICU 70 on Ubuntu 22.04 ["lazy'fox", "jumped:over", "the.dog"], + # pyicu 2.10.2 on Alpine edge / macOS + ["lazy'fox", "jumped", "over", "the.dog"], ), ) From 242d2a27ce18e682106854f5280566f4ced98c34 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 3 Mar 2023 14:26:14 +0000 Subject: [PATCH 273/344] Use nightly rustfmt in CI (#15188) As we use some nightly only options, e.g. to group and sort imports consistently. --- .github/workflows/tests.yml | 3 ++- changelog.d/15188.misc | 1 + rust/benches/evaluator.rs | 1 + rust/src/push/evaluator.rs | 2 +- 4 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15188.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 48a33c2f49fa..806bd2bfa440 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -156,7 +156,8 @@ jobs: # We pin to a specific commit for paranoia's sake. uses: dtolnay/rust-toolchain@e12eda571dc9a5ee5d58eecf4738ec291c66f295 with: - toolchain: 1.58.1 + # We use nightly so that it correctly groups together imports + toolchain: nightly-2022-12-01 components: rustfmt - uses: Swatinem/rust-cache@v2 diff --git a/changelog.d/15188.misc b/changelog.d/15188.misc new file mode 100644 index 000000000000..e4e9472f0108 --- /dev/null +++ b/changelog.d/15188.misc @@ -0,0 +1 @@ +Use nightly rustfmt in CI. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 7c987d4948e0..44477e63f783 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -14,6 +14,7 @@ #![feature(test)] use std::collections::BTreeSet; + use synapse::push::{ evaluator::PushRuleEvaluator, Condition, EventMatchCondition, FilteredPushRules, JsonValue, PushRules, SimpleJsonValue, diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 55846627cca3..1c2a05ad9a7c 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -15,7 +15,6 @@ use std::borrow::Cow; use std::collections::BTreeMap; -use crate::push::{EventMatchPatternType, JsonValue}; use anyhow::{Context, Error}; use lazy_static::lazy_static; use log::warn; @@ -27,6 +26,7 @@ use super::{ Action, Condition, ExactEventMatchCondition, FilteredPushRules, KnownCondition, SimpleJsonValue, }; +use crate::push::{EventMatchPatternType, JsonValue}; lazy_static! { /// Used to parse the `is` clause in the room member count condition. From 95876cf5f13172e8cbd624499a89dd1ae3e1f0dd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 6 Mar 2023 12:01:05 +0000 Subject: [PATCH 274/344] Bump serde_json from 1.0.93 to 1.0.94 (#15214)Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions * Bump serde_json from 1.0.93 to 1.0.94 Bumps [serde_json](https://github.com/serde-rs/json) from 1.0.93 to 1.0.94. - [Release notes](https://github.com/serde-rs/json/releases) - [Commits](https://github.com/serde-rs/json/compare/v1.0.93...v1.0.94) --- updated-dependencies: - dependency-name: serde_json dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- Cargo.lock | 4 ++-- changelog.d/15214.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15214.misc diff --git a/Cargo.lock b/Cargo.lock index 1bf76cb863ce..f858b2107f6f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -343,9 +343,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cad406b69c91885b5107daf2c29572f6c8cdb3c66826821e286c533490c0bc76" +checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" dependencies = [ "itoa", "ryu", diff --git a/changelog.d/15214.misc b/changelog.d/15214.misc new file mode 100644 index 000000000000..91a8cb9d7288 --- /dev/null +++ b/changelog.d/15214.misc @@ -0,0 +1 @@ +Bump serde_json from 1.0.93 to 1.0.94. From fd9cadcf532ce0dbd005541fe635b214aa6d2438 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 6 Mar 2023 08:38:01 -0500 Subject: [PATCH 275/344] Stabilize support for MSC3758: event_property_is push condition (#15185) This removes the configuration flag & updates the identifiers to use the stable version. --- changelog.d/15185.feature | 1 + rust/benches/evaluator.rs | 4 --- rust/src/push/base_rules.rs | 8 ++--- rust/src/push/evaluator.rs | 36 +++++++-------------- rust/src/push/mod.rs | 36 +++++++++------------ stubs/synapse/synapse_rust/push.pyi | 1 - synapse/config/experimental.py | 8 +---- synapse/push/bulk_push_rule_evaluator.py | 1 - tests/push/test_bulk_push_rule_evaluator.py | 2 -- tests/push/test_push_rule_evaluator.py | 23 +++---------- 10 files changed, 39 insertions(+), 81 deletions(-) create mode 100644 changelog.d/15185.feature diff --git a/changelog.d/15185.feature b/changelog.d/15185.feature new file mode 100644 index 000000000000..901900bdecf7 --- /dev/null +++ b/changelog.d/15185.feature @@ -0,0 +1 @@ +Stabilise support for [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758): `event_property_is` push condition. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 44477e63f783..79b553dbb0d3 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -53,7 +53,6 @@ fn bench_match_exact(b: &mut Bencher) { vec![], false, false, - false, ) .unwrap(); @@ -100,7 +99,6 @@ fn bench_match_word(b: &mut Bencher) { vec![], false, false, - false, ) .unwrap(); @@ -147,7 +145,6 @@ fn bench_match_word_miss(b: &mut Bencher) { vec![], false, false, - false, ) .unwrap(); @@ -194,7 +191,6 @@ fn bench_eval_message(b: &mut Bencher) { vec![], false, false, - false, ) .unwrap(); diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 3d72a4a4c36e..ec8d96656ace 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -24,10 +24,10 @@ use super::KnownCondition; use crate::push::RelatedEventMatchTypeCondition; use crate::push::SetTweak; use crate::push::TweakValue; -use crate::push::{Action, ExactEventMatchCondition, SimpleJsonValue}; +use crate::push::{Action, EventPropertyIsCondition, SimpleJsonValue}; use crate::push::{Condition, EventMatchTypeCondition}; use crate::push::{EventMatchCondition, EventMatchPatternType}; -use crate::push::{ExactEventMatchTypeCondition, PushRule}; +use crate::push::{EventPropertyIsTypeCondition, PushRule}; const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak { set_tweak: Cow::Borrowed("highlight"), @@ -145,7 +145,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ rule_id: Cow::Borrowed(".org.matrix.msc3952.is_user_mention"), priority_class: 5, conditions: Cow::Borrowed(&[Condition::Known( - KnownCondition::ExactEventPropertyContainsType(ExactEventMatchTypeCondition { + KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition { key: Cow::Borrowed("content.org.matrix.msc3952.mentions.user_ids"), value_type: Cow::Borrowed(&EventMatchPatternType::UserId), }), @@ -166,7 +166,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ rule_id: Cow::Borrowed(".org.matrix.msc3952.is_room_mention"), priority_class: 5, conditions: Cow::Borrowed(&[ - Condition::Known(KnownCondition::ExactEventMatch(ExactEventMatchCondition { + Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition { key: Cow::Borrowed("content.org.matrix.msc3952.mentions.room"), value: Cow::Borrowed(&SimpleJsonValue::Bool(true)), })), diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 1c2a05ad9a7c..67fe6a4823fe 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -23,7 +23,7 @@ use regex::Regex; use super::{ utils::{get_glob_matcher, get_localpart_from_id, GlobMatchType}, - Action, Condition, ExactEventMatchCondition, FilteredPushRules, KnownCondition, + Action, Condition, EventPropertyIsCondition, FilteredPushRules, KnownCondition, SimpleJsonValue, }; use crate::push::{EventMatchPatternType, JsonValue}; @@ -97,9 +97,6 @@ pub struct PushRuleEvaluator { /// flag as MSC1767 (extensible events core). msc3931_enabled: bool, - /// If MSC3758 (exact_event_match push rule condition) is enabled. - msc3758_exact_event_match: bool, - /// If MSC3966 (exact_event_property_contains push rule condition) is enabled. msc3966_exact_event_property_contains: bool, } @@ -119,7 +116,6 @@ impl PushRuleEvaluator { related_event_match_enabled: bool, room_version_feature_flags: Vec, msc3931_enabled: bool, - msc3758_exact_event_match: bool, msc3966_exact_event_property_contains: bool, ) -> Result { let body = match flattened_keys.get("content.body") { @@ -138,7 +134,6 @@ impl PushRuleEvaluator { related_event_match_enabled, room_version_feature_flags, msc3931_enabled, - msc3758_exact_event_match, msc3966_exact_event_property_contains, }) } @@ -275,8 +270,8 @@ impl PushRuleEvaluator { self.match_event_match(&self.flattened_keys, &event_match.key, pattern)? } - KnownCondition::ExactEventMatch(exact_event_match) => { - self.match_exact_event_match(exact_event_match)? + KnownCondition::EventPropertyIs(event_property_is) => { + self.match_event_property_is(event_property_is)? } KnownCondition::RelatedEventMatch(event_match) => self.match_related_event_match( &event_match.rel_type.clone(), @@ -306,10 +301,10 @@ impl PushRuleEvaluator { Some(Cow::Borrowed(pattern)), )? } - KnownCondition::ExactEventPropertyContains(exact_event_match) => self + KnownCondition::ExactEventPropertyContains(event_property_is) => self .match_exact_event_property_contains( - exact_event_match.key.clone(), - exact_event_match.value.clone(), + event_property_is.key.clone(), + event_property_is.value.clone(), )?, KnownCondition::ExactEventPropertyContainsType(exact_event_match) => { // The `pattern_type` can either be "user_id" or "user_localpart", @@ -405,20 +400,15 @@ impl PushRuleEvaluator { compiled_pattern.is_match(haystack) } - /// Evaluates a `exact_event_match` condition. (MSC3758) - fn match_exact_event_match( + /// Evaluates a `event_property_is` condition. + fn match_event_property_is( &self, - exact_event_match: &ExactEventMatchCondition, + event_property_is: &EventPropertyIsCondition, ) -> Result { - // First check if the feature is enabled. - if !self.msc3758_exact_event_match { - return Ok(false); - } - - let value = &exact_event_match.value; + let value = &event_property_is.value; let haystack = if let Some(JsonValue::Value(haystack)) = - self.flattened_keys.get(&*exact_event_match.key) + self.flattened_keys.get(&*event_property_is.key) { haystack } else { @@ -464,7 +454,7 @@ impl PushRuleEvaluator { } } - /// Evaluates a `exact_event_property_contains` condition. (MSC3758) + /// Evaluates a `exact_event_property_contains` condition. (MSC3966) fn match_exact_event_property_contains( &self, key: Cow, @@ -526,7 +516,6 @@ fn push_rule_evaluator() { vec![], true, true, - true, ) .unwrap(); @@ -557,7 +546,6 @@ fn test_requires_room_version_supports_condition() { flags, true, true, - true, ) .unwrap(); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 6391d2ed47de..7fde88e8256a 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -331,21 +331,20 @@ pub enum KnownCondition { // Identical to event_match but gives predefined patterns. Cannot be added by users. #[serde(skip_deserializing, rename = "event_match")] EventMatchType(EventMatchTypeCondition), - #[serde(rename = "com.beeper.msc3758.exact_event_match")] - ExactEventMatch(ExactEventMatchCondition), + EventPropertyIs(EventPropertyIsCondition), #[serde(rename = "im.nheko.msc3664.related_event_match")] RelatedEventMatch(RelatedEventMatchCondition), // Identical to related_event_match but gives predefined patterns. Cannot be added by users. #[serde(skip_deserializing, rename = "im.nheko.msc3664.related_event_match")] RelatedEventMatchType(RelatedEventMatchTypeCondition), #[serde(rename = "org.matrix.msc3966.exact_event_property_contains")] - ExactEventPropertyContains(ExactEventMatchCondition), + ExactEventPropertyContains(EventPropertyIsCondition), // Identical to exact_event_property_contains but gives predefined patterns. Cannot be added by users. #[serde( skip_deserializing, rename = "org.matrix.msc3966.exact_event_property_contains" )] - ExactEventPropertyContainsType(ExactEventMatchTypeCondition), + ExactEventPropertyContainsType(EventPropertyIsTypeCondition), ContainsDisplayName, RoomMemberCount { #[serde(skip_serializing_if = "Option::is_none")] @@ -395,16 +394,16 @@ pub struct EventMatchTypeCondition { pub pattern_type: Cow<'static, EventMatchPatternType>, } -/// The body of a [`Condition::ExactEventMatch`] +/// The body of a [`Condition::EventPropertyIs`] #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct ExactEventMatchCondition { +pub struct EventPropertyIsCondition { pub key: Cow<'static, str>, pub value: Cow<'static, SimpleJsonValue>, } -/// The body of a [`Condition::ExactEventMatch`] that uses user_id or user_localpart as a pattern. +/// The body of a [`Condition::EventPropertyIs`] that uses user_id or user_localpart as a pattern. #[derive(Serialize, Debug, Clone)] -pub struct ExactEventMatchTypeCondition { +pub struct EventPropertyIsTypeCondition { pub key: Cow<'static, str>, // During serialization, the pattern_type property gets replaced with a // pattern property of the correct value in synapse.push.clientformat.format_push_rules_for_user. @@ -711,44 +710,41 @@ fn test_deserialize_unstable_msc3931_condition() { } #[test] -fn test_deserialize_unstable_msc3758_condition() { +fn test_deserialize_event_property_is_condition() { // A string condition should work. - let json = - r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":"foo"}"#; + let json = r#"{"kind":"event_property_is","key":"content.value","value":"foo"}"#; let condition: Condition = serde_json::from_str(json).unwrap(); assert!(matches!( condition, - Condition::Known(KnownCondition::ExactEventMatch(_)) + Condition::Known(KnownCondition::EventPropertyIs(_)) )); // A boolean condition should work. - let json = - r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":true}"#; + let json = r#"{"kind":"event_property_is","key":"content.value","value":true}"#; let condition: Condition = serde_json::from_str(json).unwrap(); assert!(matches!( condition, - Condition::Known(KnownCondition::ExactEventMatch(_)) + Condition::Known(KnownCondition::EventPropertyIs(_)) )); // An integer condition should work. - let json = r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":1}"#; + let json = r#"{"kind":"event_property_is","key":"content.value","value":1}"#; let condition: Condition = serde_json::from_str(json).unwrap(); assert!(matches!( condition, - Condition::Known(KnownCondition::ExactEventMatch(_)) + Condition::Known(KnownCondition::EventPropertyIs(_)) )); // A null condition should work - let json = - r#"{"kind":"com.beeper.msc3758.exact_event_match","key":"content.value","value":null}"#; + let json = r#"{"kind":"event_property_is","key":"content.value","value":null}"#; let condition: Condition = serde_json::from_str(json).unwrap(); assert!(matches!( condition, - Condition::Known(KnownCondition::ExactEventMatch(_)) + Condition::Known(KnownCondition::EventPropertyIs(_)) )); } diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index c17796ffbd6b..c040944aac55 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -65,7 +65,6 @@ class PushRuleEvaluator: related_event_match_enabled: bool, room_version_feature_flags: Tuple[str, ...], msc3931_enabled: bool, - msc3758_exact_event_match: bool, msc3966_exact_event_property_contains: bool, ): ... def run( diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index fc64f2bda11d..9c58cee2c8e3 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -169,11 +169,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC3925: do not replace events with their edits self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False) - # MSC3758: exact_event_match push rule condition - self.msc3758_exact_event_match = experimental.get( - "msc3758_exact_event_match", False - ) - # MSC3873: Disambiguate event_match keys. self.msc3873_escape_event_match_key = experimental.get( "msc3873_escape_event_match_key", False @@ -184,10 +179,9 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "msc3966_exact_event_property_contains", False ) - # MSC3952: Intentional mentions, this depends on MSC3758 and MSC3966. + # MSC3952: Intentional mentions, this depends on MSC3966. self.msc3952_intentional_mentions = ( experimental.get("msc3952_intentional_mentions", False) - and self.msc3758_exact_event_match and self.msc3966_exact_event_property_contains ) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index abcf687f0528..ba12b6d79a18 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -413,7 +413,6 @@ async def _action_for_event_by_user( self._related_event_match_enabled, event.room_version.msc3931_push_features, self.hs.config.experimental.msc1767_enabled, # MSC3931 flag - self.hs.config.experimental.msc3758_exact_event_match, self.hs.config.experimental.msc3966_exact_event_property_contains, ) diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 73fecfd4adf4..c6591c50de76 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -231,7 +231,6 @@ def _create_and_process( @override_config( { "experimental_features": { - "msc3758_exact_event_match": True, "msc3952_intentional_mentions": True, "msc3966_exact_event_property_contains": True, } @@ -335,7 +334,6 @@ def test_user_mentions(self) -> None: @override_config( { "experimental_features": { - "msc3758_exact_event_match": True, "msc3952_intentional_mentions": True, "msc3966_exact_event_property_contains": True, } diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index d4a4bc4d93cb..ff5a9a66f5fe 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -173,7 +173,6 @@ def _get_evaluator( related_event_match_enabled=True, room_version_feature_flags=event.room_version.msc3931_push_features, msc3931_enabled=True, - msc3758_exact_event_match=True, msc3966_exact_event_property_contains=True, ) @@ -404,7 +403,7 @@ def test_exact_event_match_string(self) -> None: # Test against a string value. condition = { - "kind": "com.beeper.msc3758.exact_event_match", + "kind": "event_property_is", "key": "content.value", "value": "foobaz", } @@ -442,11 +441,7 @@ def test_exact_event_match_boolean(self) -> None: """Check that exact_event_match conditions work as expected for booleans.""" # Test against a True boolean value. - condition = { - "kind": "com.beeper.msc3758.exact_event_match", - "key": "content.value", - "value": True, - } + condition = {"kind": "event_property_is", "key": "content.value", "value": True} self._assert_matches( condition, {"value": True}, @@ -466,7 +461,7 @@ def test_exact_event_match_boolean(self) -> None: # Test against a False boolean value. condition = { - "kind": "com.beeper.msc3758.exact_event_match", + "kind": "event_property_is", "key": "content.value", "value": False, } @@ -491,11 +486,7 @@ def test_exact_event_match_boolean(self) -> None: def test_exact_event_match_null(self) -> None: """Check that exact_event_match conditions work as expected for null.""" - condition = { - "kind": "com.beeper.msc3758.exact_event_match", - "key": "content.value", - "value": None, - } + condition = {"kind": "event_property_is", "key": "content.value", "value": None} self._assert_matches( condition, {"value": None}, @@ -511,11 +502,7 @@ def test_exact_event_match_null(self) -> None: def test_exact_event_match_integer(self) -> None: """Check that exact_event_match conditions work as expected for integers.""" - condition = { - "kind": "com.beeper.msc3758.exact_event_match", - "key": "content.value", - "value": 1, - } + condition = {"kind": "event_property_is", "key": "content.value", "value": 1} self._assert_matches( condition, {"value": 1}, From 05e0a4089a013979e5d0642f6a0f1d22ad865ee1 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 6 Mar 2023 09:43:01 -0500 Subject: [PATCH 276/344] Stop applying edits to event contents (MSC3925). (#15193) Enables MSC3925 support by default, which: * Includes the full edit event in the bundled aggregations of an edited event. * Stops modifying the original event's content to return the new content from the edit event. This is a backwards-incompatible change that is considered to be "correct" by the spec. --- changelog.d/15193.bugfix | 1 + synapse/config/experimental.py | 3 -- synapse/events/utils.py | 57 +--------------------------- synapse/rest/client/room.py | 2 +- synapse/server.py | 2 +- tests/rest/client/test_relations.py | 59 +++++------------------------ 6 files changed, 15 insertions(+), 109 deletions(-) create mode 100644 changelog.d/15193.bugfix diff --git a/changelog.d/15193.bugfix b/changelog.d/15193.bugfix new file mode 100644 index 000000000000..ca781e9631ec --- /dev/null +++ b/changelog.d/15193.bugfix @@ -0,0 +1 @@ +Stop applying edits when bundling aggregations, per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925). diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 9c58cee2c8e3..489f2601acfa 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -166,9 +166,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC3391: Removing account data. self.msc3391_enabled = experimental.get("msc3391_enabled", False) - # MSC3925: do not replace events with their edits - self.msc3925_inhibit_edit = experimental.get("msc3925_inhibit_edit", False) - # MSC3873: Disambiguate event_match keys. self.msc3873_escape_event_match_key = experimental.get( "msc3873_escape_event_match_key", False diff --git a/synapse/events/utils.py b/synapse/events/utils.py index eaa6cad4afa8..45f46949a18e 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -39,7 +39,6 @@ from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersion from synapse.types import JsonDict -from synapse.util.frozenutils import unfreeze from . import EventBase @@ -403,14 +402,6 @@ class EventClientSerializer: clients. """ - def __init__(self, inhibit_replacement_via_edits: bool = False): - """ - Args: - inhibit_replacement_via_edits: If this is set to True, then events are - never replaced by their edits. - """ - self._inhibit_replacement_via_edits = inhibit_replacement_via_edits - def serialize_event( self, event: Union[JsonDict, EventBase], @@ -418,7 +409,6 @@ def serialize_event( *, config: SerializeEventConfig = _DEFAULT_SERIALIZE_EVENT_CONFIG, bundle_aggregations: Optional[Dict[str, "BundledAggregations"]] = None, - apply_edits: bool = True, ) -> JsonDict: """Serializes a single event. @@ -428,10 +418,7 @@ def serialize_event( config: Event serialization config bundle_aggregations: A map from event_id to the aggregations to be bundled into the event. - apply_edits: Whether the content of the event should be modified to reflect - any replacement in `bundle_aggregations[].replace`. - See also the `inhibit_replacement_via_edits` constructor arg: if that is - set to True, then this argument is ignored. + Returns: The serialized event """ @@ -450,38 +437,10 @@ def serialize_event( config, bundle_aggregations, serialized_event, - apply_edits=apply_edits, ) return serialized_event - def _apply_edit( - self, orig_event: EventBase, serialized_event: JsonDict, edit: EventBase - ) -> None: - """Replace the content, preserving existing relations of the serialized event. - - Args: - orig_event: The original event. - serialized_event: The original event, serialized. This is modified. - edit: The event which edits the above. - """ - - # Ensure we take copies of the edit content, otherwise we risk modifying - # the original event. - edit_content = edit.content.copy() - - # Unfreeze the event content if necessary, so that we may modify it below - edit_content = unfreeze(edit_content) - serialized_event["content"] = edit_content.get("m.new_content", {}) - - # Check for existing relations - relates_to = orig_event.content.get("m.relates_to") - if relates_to: - # Keep the relations, ensuring we use a dict copy of the original - serialized_event["content"]["m.relates_to"] = relates_to.copy() - else: - serialized_event["content"].pop("m.relates_to", None) - def _inject_bundled_aggregations( self, event: EventBase, @@ -489,7 +448,6 @@ def _inject_bundled_aggregations( config: SerializeEventConfig, bundled_aggregations: Dict[str, "BundledAggregations"], serialized_event: JsonDict, - apply_edits: bool, ) -> None: """Potentially injects bundled aggregations into the unsigned portion of the serialized event. @@ -504,9 +462,6 @@ def _inject_bundled_aggregations( While serializing the bundled aggregations this map may be searched again for additional events in a recursive manner. serialized_event: The serialized event which may be modified. - apply_edits: Whether the content of the event should be modified to reflect - any replacement in `aggregations.replace` (subject to the - `inhibit_replacement_via_edits` constructor arg). """ # We have already checked that aggregations exist for this event. @@ -522,11 +477,6 @@ def _inject_bundled_aggregations( ] = event_aggregations.references if event_aggregations.replace: - # If there is an edit, optionally apply it to the event. - edit = event_aggregations.replace - if apply_edits and not self._inhibit_replacement_via_edits: - self._apply_edit(event, serialized_event, edit) - # Include information about it in the relations dict. # # Matrix spec v1.5 (https://spec.matrix.org/v1.5/client-server-api/#server-side-aggregation-of-mreplace-relationships) @@ -534,10 +484,7 @@ def _inject_bundled_aggregations( # `sender` of the edit; however MSC3925 proposes extending it to the whole # of the edit, which is what we do here. serialized_aggregations[RelationTypes.REPLACE] = self.serialize_event( - edit, - time_now, - config=config, - apply_edits=False, + event_aggregations.replace, time_now, config=config ) # Include any threaded replies to this event. diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 45aee3d3fe39..c5af07816a2b 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -818,7 +818,7 @@ async def on_GET( # per MSC2676, /rooms/{roomId}/event/{eventId}, should return the # *original* event, rather than the edited version event_dict = self._event_serializer.serialize_event( - event, time_now, bundle_aggregations=aggregations, apply_edits=False + event, time_now, bundle_aggregations=aggregations ) return 200, event_dict diff --git a/synapse/server.py b/synapse/server.py index a7c32e9a6092..df80fc1bebdc 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -743,7 +743,7 @@ def get_oidc_handler(self) -> "OidcHandler": @cache_in_self def get_event_client_serializer(self) -> EventClientSerializer: - return EventClientSerializer(self.config.experimental.msc3925_inhibit_edit) + return EventClientSerializer() @cache_in_self def get_password_policy_handler(self) -> PasswordPolicyHandler: diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index a8a0a1614104..fbbbcb23f1bd 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -30,7 +30,6 @@ from tests.server import FakeChannel from tests.test_utils import make_awaitable from tests.test_utils.event_injection import inject_event -from tests.unittest import override_config class BaseRelationsTestCase(unittest.HomeserverTestCase): @@ -403,7 +402,7 @@ def _assert_edit_bundle( def test_edit(self) -> None: """Test that a simple edit works.""" - + orig_body = {"body": "Hi!", "msgtype": "m.text"} new_body = {"msgtype": "m.text", "body": "I've been edited!"} edit_event_content = { "msgtype": "m.text", @@ -424,9 +423,7 @@ def test_edit(self) -> None: access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - channel.json_body["content"], {"body": "Hi!", "msgtype": "m.text"} - ) + self.assertEqual(channel.json_body["content"], orig_body) self._assert_edit_bundle(channel.json_body, edit_event_id, edit_event_content) # Request the room messages. @@ -443,7 +440,7 @@ def test_edit(self) -> None: ) # Request the room context. - # /context should return the edited event. + # /context should return the event. channel = self.make_request( "GET", f"/rooms/{self.room}/context/{self.parent_id}", @@ -453,7 +450,7 @@ def test_edit(self) -> None: self._assert_edit_bundle( channel.json_body["event"], edit_event_id, edit_event_content ) - self.assertEqual(channel.json_body["event"]["content"], new_body) + self.assertEqual(channel.json_body["event"]["content"], orig_body) # Request sync, but limit the timeline so it becomes limited (and includes # bundled aggregations). @@ -491,45 +488,11 @@ def test_edit(self) -> None: edit_event_content, ) - @override_config({"experimental_features": {"msc3925_inhibit_edit": True}}) - def test_edit_inhibit_replace(self) -> None: - """ - If msc3925_inhibit_edit is enabled, then the original event should not be - replaced. - """ - - new_body = {"msgtype": "m.text", "body": "I've been edited!"} - edit_event_content = { - "msgtype": "m.text", - "body": "foo", - "m.new_content": new_body, - } - channel = self._send_relation( - RelationTypes.REPLACE, - "m.room.message", - content=edit_event_content, - ) - edit_event_id = channel.json_body["event_id"] - - # /context should return the *original* event. - channel = self.make_request( - "GET", - f"/rooms/{self.room}/context/{self.parent_id}", - access_token=self.user_token, - ) - self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - channel.json_body["event"]["content"], {"body": "Hi!", "msgtype": "m.text"} - ) - self._assert_edit_bundle( - channel.json_body["event"], edit_event_id, edit_event_content - ) - def test_multi_edit(self) -> None: """Test that multiple edits, including attempts by people who shouldn't be allowed, are correctly handled. """ - + orig_body = orig_body = {"body": "Hi!", "msgtype": "m.text"} self._send_relation( RelationTypes.REPLACE, "m.room.message", @@ -570,7 +533,7 @@ def test_multi_edit(self) -> None: ) self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual(channel.json_body["event"]["content"], new_body) + self.assertEqual(channel.json_body["event"]["content"], orig_body) self._assert_edit_bundle( channel.json_body["event"], edit_event_id, edit_event_content ) @@ -642,6 +605,7 @@ def test_edit_reply(self) -> None: def test_edit_edit(self) -> None: """Test that an edit cannot be edited.""" + orig_body = {"body": "Hi!", "msgtype": "m.text"} new_body = {"msgtype": "m.text", "body": "Initial edit"} edit_event_content = { "msgtype": "m.text", @@ -675,14 +639,12 @@ def test_edit_edit(self) -> None: access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual( - channel.json_body["content"], {"body": "Hi!", "msgtype": "m.text"} - ) + self.assertEqual(channel.json_body["content"], orig_body) # The relations information should not include the edit to the edit. self._assert_edit_bundle(channel.json_body, edit_event_id, edit_event_content) - # /context should return the event updated for the *first* edit + # /context should return the bundled edit for the *first* edit # (The edit to the edit should be ignored.) channel = self.make_request( "GET", @@ -690,7 +652,7 @@ def test_edit_edit(self) -> None: access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) - self.assertEqual(channel.json_body["event"]["content"], new_body) + self.assertEqual(channel.json_body["event"]["content"], orig_body) self._assert_edit_bundle( channel.json_body["event"], edit_event_id, edit_event_content ) @@ -1287,7 +1249,6 @@ def test_thread_edit_latest_event(self) -> None: thread_summary = relations_dict[RelationTypes.THREAD] self.assertIn("latest_event", thread_summary) latest_event_in_thread = thread_summary["latest_event"] - self.assertEqual(latest_event_in_thread["content"]["body"], "I've been edited!") # The latest event in the thread should have the edit appear under the # bundled aggregations. self.assertDictContainsSubset( From 41f127e06861230024f43aa4ce272116dc886700 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Mon, 6 Mar 2023 17:08:39 +0100 Subject: [PATCH 277/344] Pass the requester during event serialization. (#15174) This allows Synapse to properly include the transaction ID in the unsigned data of events. --- changelog.d/15174.bugfix | 1 + synapse/events/utils.py | 30 +++++++++++----- synapse/handlers/events.py | 20 +++++------ synapse/handlers/initial_sync.py | 51 ++++++++++++++++++++-------- synapse/handlers/message.py | 9 +++-- synapse/handlers/pagination.py | 4 ++- synapse/handlers/relations.py | 12 +++++-- synapse/handlers/search.py | 43 +++++++++++++++-------- synapse/rest/client/events.py | 16 +++++---- synapse/rest/client/notifications.py | 12 ++++--- synapse/rest/client/room.py | 18 +++++++--- synapse/rest/client/sync.py | 10 +++--- 12 files changed, 151 insertions(+), 75 deletions(-) create mode 100644 changelog.d/15174.bugfix diff --git a/changelog.d/15174.bugfix b/changelog.d/15174.bugfix new file mode 100644 index 000000000000..a0c70cbe2252 --- /dev/null +++ b/changelog.d/15174.bugfix @@ -0,0 +1 @@ +Add the `transaction_id` in the events included in many endpoints responses. diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 45f46949a18e..b9c15ffcdb36 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -38,7 +38,7 @@ ) from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import RoomVersion -from synapse.types import JsonDict +from synapse.types import JsonDict, Requester from . import EventBase @@ -316,8 +316,9 @@ class SerializeEventConfig: as_client_event: bool = True # Function to convert from federation format to client format event_format: Callable[[JsonDict], JsonDict] = format_event_for_client_v1 - # ID of the user's auth token - used for namespacing of transaction IDs - token_id: Optional[int] = None + # The entity that requested the event. This is used to determine whether to include + # the transaction_id in the unsigned section of the event. + requester: Optional[Requester] = None # List of event fields to include. If empty, all fields will be returned. only_event_fields: Optional[List[str]] = None # Some events can have stripped room state stored in the `unsigned` field. @@ -367,11 +368,24 @@ def serialize_event( e.unsigned["redacted_because"], time_now_ms, config=config ) - if config.token_id is not None: - if config.token_id == getattr(e.internal_metadata, "token_id", None): - txn_id = getattr(e.internal_metadata, "txn_id", None) - if txn_id is not None: - d["unsigned"]["transaction_id"] = txn_id + # If we have a txn_id saved in the internal_metadata, we should include it in the + # unsigned section of the event if it was sent by the same session as the one + # requesting the event. + # There is a special case for guests, because they only have one access token + # without associated access_token_id, so we always include the txn_id for events + # they sent. + txn_id = getattr(e.internal_metadata, "txn_id", None) + if txn_id is not None and config.requester is not None: + event_token_id = getattr(e.internal_metadata, "token_id", None) + if config.requester.user.to_string() == e.sender and ( + ( + event_token_id is not None + and config.requester.access_token_id is not None + and event_token_id == config.requester.access_token_id + ) + or config.requester.is_guest + ): + d["unsigned"]["transaction_id"] = txn_id # invite_room_state and knock_room_state are a list of stripped room state events # that are meant to provide metadata about a room to an invitee/knocker. They are diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 949b69cb4189..68c07f0265c9 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -23,7 +23,7 @@ from synapse.handlers.presence import format_user_presence_state from synapse.storage.databases.main.events_worker import EventRedactBehaviour from synapse.streams.config import PaginationConfig -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, Requester, UserID from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -46,13 +46,12 @@ def __init__(self, hs: "HomeServer"): async def get_stream( self, - auth_user_id: str, + requester: Requester, pagin_config: PaginationConfig, timeout: int = 0, as_client_event: bool = True, affect_presence: bool = True, room_id: Optional[str] = None, - is_guest: bool = False, ) -> JsonDict: """Fetches the events stream for a given user.""" @@ -62,13 +61,12 @@ async def get_stream( raise SynapseError(403, "This room has been blocked on this server") # send any outstanding server notices to the user. - await self._server_notices_sender.on_user_syncing(auth_user_id) + await self._server_notices_sender.on_user_syncing(requester.user.to_string()) - auth_user = UserID.from_string(auth_user_id) presence_handler = self.hs.get_presence_handler() context = await presence_handler.user_syncing( - auth_user_id, + requester.user.to_string(), affect_presence=affect_presence, presence_state=PresenceState.ONLINE, ) @@ -82,10 +80,10 @@ async def get_stream( timeout = random.randint(int(timeout * 0.9), int(timeout * 1.1)) stream_result = await self.notifier.get_events_for( - auth_user, + requester.user, pagin_config, timeout, - is_guest=is_guest, + is_guest=requester.is_guest, explicit_room_id=room_id, ) events = stream_result.events @@ -102,7 +100,7 @@ async def get_stream( if event.membership != Membership.JOIN: continue # Send down presence. - if event.state_key == auth_user_id: + if event.state_key == requester.user.to_string(): # Send down presence for everyone in the room. users: Iterable[str] = await self.store.get_users_in_room( event.room_id @@ -124,7 +122,9 @@ async def get_stream( chunks = self._event_serializer.serialize_events( events, time_now, - config=SerializeEventConfig(as_client_event=as_client_event), + config=SerializeEventConfig( + as_client_event=as_client_event, requester=requester + ), ) chunk = { diff --git a/synapse/handlers/initial_sync.py b/synapse/handlers/initial_sync.py index aead0b44b9fd..b3be7a86f0df 100644 --- a/synapse/handlers/initial_sync.py +++ b/synapse/handlers/initial_sync.py @@ -318,11 +318,9 @@ async def room_initial_sync( ) is_peeking = member_event_id is None - user_id = requester.user.to_string() - if membership == Membership.JOIN: result = await self._room_initial_sync_joined( - user_id, room_id, pagin_config, membership, is_peeking + requester, room_id, pagin_config, membership, is_peeking ) elif membership == Membership.LEAVE: # The member_event_id will always be available if membership is set @@ -330,10 +328,16 @@ async def room_initial_sync( assert member_event_id result = await self._room_initial_sync_parted( - user_id, room_id, pagin_config, membership, member_event_id, is_peeking + requester, + room_id, + pagin_config, + membership, + member_event_id, + is_peeking, ) account_data_events = [] + user_id = requester.user.to_string() tags = await self.store.get_tags_for_room(user_id, room_id) if tags: account_data_events.append( @@ -350,7 +354,7 @@ async def room_initial_sync( async def _room_initial_sync_parted( self, - user_id: str, + requester: Requester, room_id: str, pagin_config: PaginationConfig, membership: str, @@ -369,13 +373,17 @@ async def _room_initial_sync_parted( ) messages = await filter_events_for_client( - self._storage_controllers, user_id, messages, is_peeking=is_peeking + self._storage_controllers, + requester.user.to_string(), + messages, + is_peeking=is_peeking, ) start_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, token) end_token = StreamToken.START.copy_and_replace(StreamKeyType.ROOM, stream_token) time_now = self.clock.time_msec() + serialize_options = SerializeEventConfig(requester=requester) return { "membership": membership, @@ -383,14 +391,18 @@ async def _room_initial_sync_parted( "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events(messages, time_now) + self._event_serializer.serialize_events( + messages, time_now, config=serialize_options + ) ), "start": await start_token.to_string(self.store), "end": await end_token.to_string(self.store), }, "state": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events(room_state.values(), time_now) + self._event_serializer.serialize_events( + room_state.values(), time_now, config=serialize_options + ) ), "presence": [], "receipts": [], @@ -398,7 +410,7 @@ async def _room_initial_sync_parted( async def _room_initial_sync_joined( self, - user_id: str, + requester: Requester, room_id: str, pagin_config: PaginationConfig, membership: str, @@ -410,9 +422,12 @@ async def _room_initial_sync_joined( # TODO: These concurrently time_now = self.clock.time_msec() + serialize_options = SerializeEventConfig(requester=requester) # Don't bundle aggregations as this is a deprecated API. state = self._event_serializer.serialize_events( - current_state.values(), time_now + current_state.values(), + time_now, + config=serialize_options, ) now_token = self.hs.get_event_sources().get_current_token() @@ -450,7 +465,10 @@ async def get_receipts() -> List[JsonDict]: if not receipts: return [] - return ReceiptEventSource.filter_out_private_receipts(receipts, user_id) + return ReceiptEventSource.filter_out_private_receipts( + receipts, + requester.user.to_string(), + ) presence, receipts, (messages, token) = await make_deferred_yieldable( gather_results( @@ -469,20 +487,23 @@ async def get_receipts() -> List[JsonDict]: ) messages = await filter_events_for_client( - self._storage_controllers, user_id, messages, is_peeking=is_peeking + self._storage_controllers, + requester.user.to_string(), + messages, + is_peeking=is_peeking, ) start_token = now_token.copy_and_replace(StreamKeyType.ROOM, token) end_token = now_token - time_now = self.clock.time_msec() - ret = { "room_id": room_id, "messages": { "chunk": ( # Don't bundle aggregations as this is a deprecated API. - self._event_serializer.serialize_events(messages, time_now) + self._event_serializer.serialize_events( + messages, time_now, config=serialize_options + ) ), "start": await start_token.to_string(self.store), "end": await end_token.to_string(self.store), diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e433d6b01f02..da129ec16a4a 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -50,7 +50,7 @@ from synapse.events import EventBase, relation_from_event from synapse.events.builder import EventBuilder from synapse.events.snapshot import EventContext, UnpersistedEventContextBase -from synapse.events.utils import maybe_upsert_event_field +from synapse.events.utils import SerializeEventConfig, maybe_upsert_event_field from synapse.events.validator import EventValidator from synapse.handlers.directory import DirectoryHandler from synapse.logging import opentracing @@ -245,8 +245,11 @@ async def get_state_events( ) room_state = room_state_events[membership_event_id] - now = self.clock.time_msec() - events = self._event_serializer.serialize_events(room_state.values(), now) + events = self._event_serializer.serialize_events( + room_state.values(), + self.clock.time_msec(), + config=SerializeEventConfig(requester=requester), + ) return events async def _user_can_see_state_at_event( diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index ceefa16b4981..8c79c055ba3e 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -579,7 +579,9 @@ async def get_messages( time_now = self.clock.time_msec() - serialize_options = SerializeEventConfig(as_client_event=as_client_event) + serialize_options = SerializeEventConfig( + as_client_event=as_client_event, requester=requester + ) chunk = { "chunk": ( diff --git a/synapse/handlers/relations.py b/synapse/handlers/relations.py index 553053b69469..1d09fdf13519 100644 --- a/synapse/handlers/relations.py +++ b/synapse/handlers/relations.py @@ -20,6 +20,7 @@ from synapse.api.constants import Direction, EventTypes, RelationTypes from synapse.api.errors import SynapseError from synapse.events import EventBase, relation_from_event +from synapse.events.utils import SerializeEventConfig from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import trace from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent @@ -151,16 +152,23 @@ async def get_relations( ) now = self._clock.time_msec() + serialize_options = SerializeEventConfig(requester=requester) return_value: JsonDict = { "chunk": self._event_serializer.serialize_events( - events, now, bundle_aggregations=aggregations + events, + now, + bundle_aggregations=aggregations, + config=serialize_options, ), } if include_original_event: # Do not bundle aggregations when retrieving the original event because # we want the content before relations are applied to it. return_value["original_event"] = self._event_serializer.serialize_event( - event, now, bundle_aggregations=None + event, + now, + bundle_aggregations=None, + config=serialize_options, ) if next_token: diff --git a/synapse/handlers/search.py b/synapse/handlers/search.py index 9bbf83047dd6..aad4706f148a 100644 --- a/synapse/handlers/search.py +++ b/synapse/handlers/search.py @@ -23,7 +23,8 @@ from synapse.api.errors import NotFoundError, SynapseError from synapse.api.filtering import Filter from synapse.events import EventBase -from synapse.types import JsonDict, StrCollection, StreamKeyType, UserID +from synapse.events.utils import SerializeEventConfig +from synapse.types import JsonDict, Requester, StrCollection, StreamKeyType, UserID from synapse.types.state import StateFilter from synapse.visibility import filter_events_for_client @@ -109,12 +110,12 @@ async def get_old_rooms_from_upgraded_room(self, room_id: str) -> Iterable[str]: return historical_room_ids async def search( - self, user: UserID, content: JsonDict, batch: Optional[str] = None + self, requester: Requester, content: JsonDict, batch: Optional[str] = None ) -> JsonDict: """Performs a full text search for a user. Args: - user: The user performing the search. + requester: The user performing the search. content: Search parameters batch: The next_batch parameter. Used for pagination. @@ -199,7 +200,7 @@ async def search( ) return await self._search( - user, + requester, batch_group, batch_group_key, batch_token, @@ -217,7 +218,7 @@ async def search( async def _search( self, - user: UserID, + requester: Requester, batch_group: Optional[str], batch_group_key: Optional[str], batch_token: Optional[str], @@ -235,7 +236,7 @@ async def _search( """Performs a full text search for a user. Args: - user: The user performing the search. + requester: The user performing the search. batch_group: Pagination information. batch_group_key: Pagination information. batch_token: Pagination information. @@ -269,7 +270,7 @@ async def _search( # TODO: Search through left rooms too rooms = await self.store.get_rooms_for_local_user_where_membership_is( - user.to_string(), + requester.user.to_string(), membership_list=[Membership.JOIN], # membership_list=[Membership.JOIN, Membership.LEAVE, Membership.Ban], ) @@ -303,13 +304,13 @@ async def _search( if order_by == "rank": search_result, sender_group = await self._search_by_rank( - user, room_ids, search_term, keys, search_filter + requester.user, room_ids, search_term, keys, search_filter ) # Unused return values for rank search. global_next_batch = None elif order_by == "recent": search_result, global_next_batch = await self._search_by_recent( - user, + requester.user, room_ids, search_term, keys, @@ -334,7 +335,7 @@ async def _search( assert after_limit is not None contexts = await self._calculate_event_contexts( - user, + requester.user, search_result.allowed_events, before_limit, after_limit, @@ -363,27 +364,37 @@ async def _search( # The returned events. search_result.allowed_events, ), - user.to_string(), + requester.user.to_string(), ) # We're now about to serialize the events. We should not make any # blocking calls after this. Otherwise, the 'age' will be wrong. time_now = self.clock.time_msec() + serialize_options = SerializeEventConfig(requester=requester) for context in contexts.values(): context["events_before"] = self._event_serializer.serialize_events( - context["events_before"], time_now, bundle_aggregations=aggregations + context["events_before"], + time_now, + bundle_aggregations=aggregations, + config=serialize_options, ) context["events_after"] = self._event_serializer.serialize_events( - context["events_after"], time_now, bundle_aggregations=aggregations + context["events_after"], + time_now, + bundle_aggregations=aggregations, + config=serialize_options, ) results = [ { "rank": search_result.rank_map[e.event_id], "result": self._event_serializer.serialize_event( - e, time_now, bundle_aggregations=aggregations + e, + time_now, + bundle_aggregations=aggregations, + config=serialize_options, ), "context": contexts.get(e.event_id, {}), } @@ -398,7 +409,9 @@ async def _search( if state_results: rooms_cat_res["state"] = { - room_id: self._event_serializer.serialize_events(state_events, time_now) + room_id: self._event_serializer.serialize_events( + state_events, time_now, config=serialize_options + ) for room_id, state_events in state_results.items() } diff --git a/synapse/rest/client/events.py b/synapse/rest/client/events.py index 782e7d14e867..694d77d28730 100644 --- a/synapse/rest/client/events.py +++ b/synapse/rest/client/events.py @@ -17,6 +17,7 @@ from typing import TYPE_CHECKING, Dict, List, Tuple, Union from synapse.api.errors import SynapseError +from synapse.events.utils import SerializeEventConfig from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_string from synapse.http.site import SynapseRequest @@ -43,9 +44,8 @@ def __init__(self, hs: "HomeServer"): async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request, allow_guest=True) - is_guest = requester.is_guest args: Dict[bytes, List[bytes]] = request.args # type: ignore - if is_guest: + if requester.is_guest: if b"room_id" not in args: raise SynapseError(400, "Guest users must specify room_id param") room_id = parse_string(request, "room_id") @@ -63,13 +63,12 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: as_client_event = b"raw" not in args chunk = await self.event_stream_handler.get_stream( - requester.user.to_string(), + requester, pagin_config, timeout=timeout, as_client_event=as_client_event, - affect_presence=(not is_guest), + affect_presence=(not requester.is_guest), room_id=room_id, - is_guest=is_guest, ) return 200, chunk @@ -91,9 +90,12 @@ async def on_GET( requester = await self.auth.get_user_by_req(request) event = await self.event_handler.get_event(requester.user, None, event_id) - time_now = self.clock.time_msec() if event: - result = self._event_serializer.serialize_event(event, time_now) + result = self._event_serializer.serialize_event( + event, + self.clock.time_msec(), + config=SerializeEventConfig(requester=requester), + ) return 200, result else: return 404, "Event not found." diff --git a/synapse/rest/client/notifications.py b/synapse/rest/client/notifications.py index 61268e3af1e2..ea1004256918 100644 --- a/synapse/rest/client/notifications.py +++ b/synapse/rest/client/notifications.py @@ -72,6 +72,12 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: next_token = None + serialize_options = SerializeEventConfig( + event_format=format_event_for_client_v2_without_room_id, + requester=requester, + ) + now = self.clock.time_msec() + for pa in push_actions: returned_pa = { "room_id": pa.room_id, @@ -81,10 +87,8 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "event": ( self._event_serializer.serialize_event( notif_events[pa.event_id], - self.clock.time_msec(), - config=SerializeEventConfig( - event_format=format_event_for_client_v2_without_room_id - ), + now, + config=serialize_options, ) ), } diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index c5af07816a2b..61e4cf0213a3 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -37,7 +37,7 @@ UnredactedContentDeletedError, ) from synapse.api.filtering import Filter -from synapse.events.utils import format_event_for_client_v2 +from synapse.events.utils import SerializeEventConfig, format_event_for_client_v2 from synapse.http.server import HttpServer from synapse.http.servlet import ( ResolveRoomIdMixin, @@ -814,11 +814,13 @@ async def on_GET( [event], requester.user.to_string() ) - time_now = self.clock.time_msec() # per MSC2676, /rooms/{roomId}/event/{eventId}, should return the # *original* event, rather than the edited version event_dict = self._event_serializer.serialize_event( - event, time_now, bundle_aggregations=aggregations + event, + self.clock.time_msec(), + bundle_aggregations=aggregations, + config=SerializeEventConfig(requester=requester), ) return 200, event_dict @@ -863,24 +865,30 @@ async def on_GET( raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND) time_now = self.clock.time_msec() + serializer_options = SerializeEventConfig(requester=requester) results = { "events_before": self._event_serializer.serialize_events( event_context.events_before, time_now, bundle_aggregations=event_context.aggregations, + config=serializer_options, ), "event": self._event_serializer.serialize_event( event_context.event, time_now, bundle_aggregations=event_context.aggregations, + config=serializer_options, ), "events_after": self._event_serializer.serialize_events( event_context.events_after, time_now, bundle_aggregations=event_context.aggregations, + config=serializer_options, ), "state": self._event_serializer.serialize_events( - event_context.state, time_now + event_context.state, + time_now, + config=serializer_options, ), "start": event_context.start, "end": event_context.end, @@ -1192,7 +1200,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: content = parse_json_object_from_request(request) batch = parse_string(request, "next_batch") - results = await self.search_handler.search(requester.user, content, batch) + results = await self.search_handler.search(requester, content, batch) return 200, results diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 8fcb8ac3d9d4..e578b26fa3d9 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -38,7 +38,7 @@ from synapse.http.servlet import RestServlet, parse_boolean, parse_integer, parse_string from synapse.http.site import SynapseRequest from synapse.logging.opentracing import trace_with_opname -from synapse.types import JsonDict, StreamToken +from synapse.types import JsonDict, Requester, StreamToken from synapse.util import json_decoder from ._base import client_patterns, set_timeline_upper_limit @@ -226,7 +226,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # We know that the the requester has an access token since appservices # cannot use sync. response_content = await self.encode_response( - time_now, sync_result, requester.access_token_id, filter_collection + time_now, sync_result, requester, filter_collection ) logger.debug("Event formatting complete") @@ -237,7 +237,7 @@ async def encode_response( self, time_now: int, sync_result: SyncResult, - access_token_id: Optional[int], + requester: Requester, filter: FilterCollection, ) -> JsonDict: logger.debug("Formatting events in sync response") @@ -250,12 +250,12 @@ async def encode_response( serialize_options = SerializeEventConfig( event_format=event_formatter, - token_id=access_token_id, + requester=requester, only_event_fields=filter.event_fields, ) stripped_serialize_options = SerializeEventConfig( event_format=event_formatter, - token_id=access_token_id, + requester=requester, include_stripped_room_state=True, ) From c69aae94cda9b62b2a82584b2f5ee72a95feb435 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 7 Mar 2023 08:51:34 +0000 Subject: [PATCH 278/344] Split up txn for fetching device keys (#15215) We look up keys in batches, but we should do that outside of the transaction to avoid starving the database pool. --- changelog.d/15215.misc | 1 + synapse/storage/database.py | 10 +++++++- .../storage/databases/main/end_to_end_keys.py | 24 ++++++++++++------- 3 files changed, 26 insertions(+), 9 deletions(-) create mode 100644 changelog.d/15215.misc diff --git a/changelog.d/15215.misc b/changelog.d/15215.misc new file mode 100644 index 000000000000..fe52a56a7e5e --- /dev/null +++ b/changelog.d/15215.misc @@ -0,0 +1 @@ +Refactor database transaction for query users' devices to reduce database pool contention. diff --git a/synapse/storage/database.py b/synapse/storage/database.py index feaa6cdd073b..5efe31aa19bc 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -672,7 +672,15 @@ def new_transaction( f = cast(types.FunctionType, func) # type: ignore[redundant-cast] if f.__closure__: for i, cell in enumerate(f.__closure__): - if inspect.isgenerator(cell.cell_contents): + try: + contents = cell.cell_contents + except ValueError: + # cell.cell_contents can raise if the "cell" is empty, + # which indicates that the variable is currently + # unbound. + continue + + if inspect.isgenerator(contents): logger.error( "Programming error: function %s references generator %s " "via its closure", diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index b9c39b171810..a3b6c8ae8e82 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -244,9 +244,7 @@ async def get_e2e_device_keys_and_signatures( set_tag("include_all_devices", include_all_devices) set_tag("include_deleted_devices", include_deleted_devices) - result = await self.db_pool.runInteraction( - "get_e2e_device_keys", - self._get_e2e_device_keys_txn, + result = await self._get_e2e_device_keys( query_list, include_all_devices, include_deleted_devices, @@ -285,9 +283,8 @@ async def get_e2e_device_keys_and_signatures( log_kv(result) return result - def _get_e2e_device_keys_txn( + async def _get_e2e_device_keys( self, - txn: LoggingTransaction, query_list: Collection[Tuple[str, Optional[str]]], include_all_devices: bool = False, include_deleted_devices: bool = False, @@ -319,7 +316,7 @@ def _get_e2e_device_keys_txn( if user_list: user_id_in_list_clause, user_args = make_in_list_sql_clause( - txn.database_engine, "user_id", user_list + self.database_engine, "user_id", user_list ) query_clauses.append(user_id_in_list_clause) query_params_list.append(user_args) @@ -332,13 +329,16 @@ def _get_e2e_device_keys_txn( user_device_id_in_list_clause, user_device_args, ) = make_tuple_in_list_sql_clause( - txn.database_engine, ("user_id", "device_id"), user_device_batch + self.database_engine, ("user_id", "device_id"), user_device_batch ) query_clauses.append(user_device_id_in_list_clause) query_params_list.append(user_device_args) result: Dict[str, Dict[str, Optional[DeviceKeyLookupResult]]] = {} - for query_clause, query_params in zip(query_clauses, query_params_list): + + def get_e2e_device_keys_txn( + txn: LoggingTransaction, query_clause: str, query_params: list + ) -> None: sql = ( "SELECT user_id, device_id, " " d.display_name, " @@ -361,6 +361,14 @@ def _get_e2e_device_keys_txn( display_name, db_to_json(key_json) if key_json else None ) + for query_clause, query_params in zip(query_clauses, query_params_list): + await self.db_pool.runInteraction( + "_get_e2e_device_keys", + get_e2e_device_keys_txn, + query_clause, + query_params, + ) + if include_deleted_devices: for user_id, device_id in deleted_devices: if device_id is None: From c114befd6b2b6d6a86d97c03f09856cfeb70ca0d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 10:13:10 +0000 Subject: [PATCH 279/344] Bump types-commonmark from 0.9.2.1 to 0.9.2.2 (#15209) --- changelog.d/15209.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15209.misc diff --git a/changelog.d/15209.misc b/changelog.d/15209.misc new file mode 100644 index 000000000000..cb361353a8b1 --- /dev/null +++ b/changelog.d/15209.misc @@ -0,0 +1 @@ +Bump types-commonmark from 0.9.2.1 to 0.9.2.2. diff --git a/poetry.lock b/poetry.lock index cd3dc6fdcd7e..e03d6a60dbe6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2575,14 +2575,14 @@ files = [ [[package]] name = "types-commonmark" -version = "0.9.2.1" +version = "0.9.2.2" description = "Typing stubs for commonmark" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-commonmark-0.9.2.1.tar.gz", hash = "sha256:db8277e6aeb83429265eccece98a24954a9a502dde7bc7cf840a8741abd96b86"}, - {file = "types_commonmark-0.9.2.1-py3-none-any.whl", hash = "sha256:9d5f500cb7eced801bde728137b0a10667bd853d328db641d03141f189e3aab4"}, + {file = "types-commonmark-0.9.2.2.tar.gz", hash = "sha256:f3259350634c2ce68ae503398430482f7cf44e5cae3d344995e916fbf453b4be"}, + {file = "types_commonmark-0.9.2.2-py3-none-any.whl", hash = "sha256:d3d878692615e7fbe47bf19ba67497837b135812d665012a3d42219c1f2c3a61"}, ] [[package]] From 89ae8ce7cacfceefe42190ecdbbc9ec7259d1e6d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 10:13:24 +0000 Subject: [PATCH 280/344] Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8 (#15210) --- changelog.d/15210.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15210.misc diff --git a/changelog.d/15210.misc b/changelog.d/15210.misc new file mode 100644 index 000000000000..e1f64b6d6332 --- /dev/null +++ b/changelog.d/15210.misc @@ -0,0 +1 @@ +Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8. diff --git a/poetry.lock b/poetry.lock index e03d6a60dbe6..430566df97bd 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2663,14 +2663,14 @@ files = [ [[package]] name = "types-psycopg2" -version = "2.9.21.4" +version = "2.9.21.8" description = "Typing stubs for psycopg2" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-psycopg2-2.9.21.4.tar.gz", hash = "sha256:d43dda166a70d073ddac40718e06539836b5844c99b58ef8d4489a8df2edf5c0"}, - {file = "types_psycopg2-2.9.21.4-py3-none-any.whl", hash = "sha256:6a05dca0856996aa37d7abe436751803bf47ec006cabbefea092e057f23bc95d"}, + {file = "types-psycopg2-2.9.21.8.tar.gz", hash = "sha256:b629440ffcfdebd742fab07f777ff69aefdd19394a138c18e921a1964c3cf5f6"}, + {file = "types_psycopg2-2.9.21.8-py3-none-any.whl", hash = "sha256:e747fbec6e0e2502b625bc7686d13cc62fc170e8ae920e5ba27fac946778eeb9"}, ] [[package]] From a9478e436e1a07942d1114fa0b41a3e455423cf8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 10:13:51 +0000 Subject: [PATCH 281/344] Bump types-setuptools from 67.4.0.3 to 67.5.0.0 (#15212) --- changelog.d/15212.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15212.misc diff --git a/changelog.d/15212.misc b/changelog.d/15212.misc new file mode 100644 index 000000000000..36c593f67d5c --- /dev/null +++ b/changelog.d/15212.misc @@ -0,0 +1 @@ +Bump types-setuptools from 67.4.0.3 to 67.5.0.0. diff --git a/poetry.lock b/poetry.lock index 430566df97bd..52b95460597e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2717,14 +2717,14 @@ types-urllib3 = "<1.27" [[package]] name = "types-setuptools" -version = "67.4.0.3" +version = "67.5.0.0" description = "Typing stubs for setuptools" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-setuptools-67.4.0.3.tar.gz", hash = "sha256:19e958dfdbf1c5a628e54c2a7ee84935051afb7278d0c1cdb08ac194757ee3b1"}, - {file = "types_setuptools-67.4.0.3-py3-none-any.whl", hash = "sha256:3c83c3a6363dd3ddcdd054796705605f0fa8b8e5a39390e07a05e5f7af054978"}, + {file = "types-setuptools-67.5.0.0.tar.gz", hash = "sha256:fa6f231eeb27e86b1d6e8260f73de300e91f99c205b9a5e21debd49f3726a849"}, + {file = "types_setuptools-67.5.0.0-py3-none-any.whl", hash = "sha256:f7f4bf4ab777e88631d3a387bbfdd4d480a2a4693ca896130f8ef738370377b8"}, ] [[package]] From 2a869d257f714714b282b0c45890652815f3bd2f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 10:14:05 +0000 Subject: [PATCH 282/344] Bump types-pillow from 9.4.0.13 to 9.4.0.17 (#15211) --- changelog.d/15211.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15211.misc diff --git a/changelog.d/15211.misc b/changelog.d/15211.misc new file mode 100644 index 000000000000..be7dfd813258 --- /dev/null +++ b/changelog.d/15211.misc @@ -0,0 +1 @@ +Bump types-pillow from 9.4.0.13 to 9.4.0.17. diff --git a/poetry.lock b/poetry.lock index 52b95460597e..de43ca7d3db7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2651,14 +2651,14 @@ files = [ [[package]] name = "types-pillow" -version = "9.4.0.13" +version = "9.4.0.17" description = "Typing stubs for Pillow" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-Pillow-9.4.0.13.tar.gz", hash = "sha256:4510aa98a28947bf63f2b29edebbd11b7cff8647d90b867cec9b3674c0a8c321"}, - {file = "types_Pillow-9.4.0.13-py3-none-any.whl", hash = "sha256:14a8a19021b8fe569a9fef9edc64a8d8a4aef340e38669d4fb3dc05cfd941130"}, + {file = "types-Pillow-9.4.0.17.tar.gz", hash = "sha256:7f0e871d2d46fbb6bc7deca3e02dc552cf9c1e8b49deb9595509551be3954e49"}, + {file = "types_Pillow-9.4.0.17-py3-none-any.whl", hash = "sha256:f8b848a05f17cb4d53d245c59bf560372b9778d4cfaf9705f6245009bf9f65f3"}, ] [[package]] From 869ef75cb7f7cd5b62dc1cde96287fe781d71589 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 7 Mar 2023 10:14:21 +0000 Subject: [PATCH 283/344] Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4 (#15213) --- changelog.d/15213.misc | 1 + poetry.lock | 48 ++++-------------------------------------- 2 files changed, 5 insertions(+), 44 deletions(-) create mode 100644 changelog.d/15213.misc diff --git a/changelog.d/15213.misc b/changelog.d/15213.misc new file mode 100644 index 000000000000..370aead769db --- /dev/null +++ b/changelog.d/15213.misc @@ -0,0 +1 @@ +Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4. diff --git a/poetry.lock b/poetry.lock index de43ca7d3db7..24adc4c8760c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2585,46 +2585,6 @@ files = [ {file = "types_commonmark-0.9.2.2-py3-none-any.whl", hash = "sha256:d3d878692615e7fbe47bf19ba67497837b135812d665012a3d42219c1f2c3a61"}, ] -[[package]] -name = "types-cryptography" -version = "3.3.15" -description = "Typing stubs for cryptography" -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "types-cryptography-3.3.15.tar.gz", hash = "sha256:a7983a75a7b88a18f88832008f0ef140b8d1097888ec1a0824ec8fb7e105273b"}, - {file = "types_cryptography-3.3.15-py3-none-any.whl", hash = "sha256:d9b0dd5465d7898d400850e7f35e5518aa93a7e23d3e11757cd81b4777089046"}, -] - -[package.dependencies] -types-enum34 = "*" -types-ipaddress = "*" - -[[package]] -name = "types-enum34" -version = "1.1.8" -description = "Typing stubs for enum34" -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "types-enum34-1.1.8.tar.gz", hash = "sha256:6f9c769641d06d73a55e11c14d38ac76fcd37eb545ce79cebb6eec9d50a64110"}, - {file = "types_enum34-1.1.8-py3-none-any.whl", hash = "sha256:05058c7a495f6bfaaca0be4aeac3cce5cdd80a2bad2aab01fd49a20bf4a0209d"}, -] - -[[package]] -name = "types-ipaddress" -version = "1.0.8" -description = "Typing stubs for ipaddress" -category = "dev" -optional = false -python-versions = "*" -files = [ - {file = "types-ipaddress-1.0.8.tar.gz", hash = "sha256:a03df3be5935e50ba03fa843daabff539a041a28e73e0fce2c5705bee54d3841"}, - {file = "types_ipaddress-1.0.8-py3-none-any.whl", hash = "sha256:4933b74da157ba877b1a705d64f6fa7742745e9ffd65e51011f370c11ebedb55"}, -] - [[package]] name = "types-jsonschema" version = "4.17.0.5" @@ -2675,18 +2635,18 @@ files = [ [[package]] name = "types-pyopenssl" -version = "22.1.0.2" +version = "23.0.0.4" description = "Typing stubs for pyOpenSSL" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-pyOpenSSL-22.1.0.2.tar.gz", hash = "sha256:7a350e29e55bc3ee4571f996b4b1c18c4e4098947db45f7485b016eaa35b44bc"}, - {file = "types_pyOpenSSL-22.1.0.2-py3-none-any.whl", hash = "sha256:54606a6afb203eb261e0fca9b7f75fa6c24d5ff71e13903c162ffb951c2c64c6"}, + {file = "types-pyOpenSSL-23.0.0.4.tar.gz", hash = "sha256:8b3550b6e19d51ce78aabd724b0d8ebd962081a5fce95e7f85a592dfcdbc16bf"}, + {file = "types_pyOpenSSL-23.0.0.4-py3-none-any.whl", hash = "sha256:ad49e15bb8bb2f251b8fc24776f414d877629e44b1b049240063ab013b5a6a7d"}, ] [package.dependencies] -types-cryptography = "*" +cryptography = ">=35.0.0" [[package]] name = "types-pyyaml" From c0854ce65aafc479db1a404b1ba17fb0fa4339ad Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Mar 2023 11:51:18 +0000 Subject: [PATCH 284/344] Hack to rebuild the complement editable image (#15184) * Hack to rebuild the complement editable image * Changelog --- changelog.d/15184.misc | 1 + scripts-dev/complement.sh | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15184.misc diff --git a/changelog.d/15184.misc b/changelog.d/15184.misc new file mode 100644 index 000000000000..53dba1b6bacd --- /dev/null +++ b/changelog.d/15184.misc @@ -0,0 +1 @@ +Add an option to force a rebuild of the "editable" complement image. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index 66aaa3d8480d..1b1761202fdd 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -59,6 +59,11 @@ Run the complement test suite on Synapse. is important. Not suitable for use in CI in case the editable environment is impure. + --rebuild-editable + Force a rebuild of the editable build of Synapse. + This is occasionally useful if the built-in rebuild detection with + --editable fails, e.g. when changing configure_workers_and_start.py. + For help on arguments to 'go test', run 'go help testflag'. EOF } @@ -82,6 +87,9 @@ while [ $# -ge 1 ]; do "-e"|"--editable") use_editable_synapse=1 ;; + "--rebuild-editable") + rebuild_editable_synapse=1 + ;; *) # unknown arg: presumably an argument to gotest. break the loop. break @@ -116,7 +124,9 @@ if [ -n "$use_editable_synapse" ]; then fi editable_mount="$(realpath .):/editable-src:z" - if docker inspect complement-synapse-editable &>/dev/null; then + if [ -n "$rebuild_editable_synapse" ]; then + unset skip_docker_build + elif docker inspect complement-synapse-editable &>/dev/null; then # complement-synapse-editable already exists: see if we can still use it: # - The Rust module must still be importable; it will fail to import if the Rust source has changed. # - The Poetry lock file must be the same (otherwise we assume dependencies have changed) From 506e24ffc4089df2d379adb747c27b70624f9427 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Mar 2023 12:11:15 +0000 Subject: [PATCH 285/344] 1.79.0rc1 --- CHANGES.md | 95 +++++++++++++++++++++++++++++++++++++++ changelog.d/14026.doc | 1 - changelog.d/14101.misc | 1 - changelog.d/14869.bugfix | 1 - changelog.d/14918.misc | 1 - changelog.d/15044.feature | 1 - changelog.d/15051.misc | 1 - changelog.d/15071.doc | 1 - changelog.d/15077.feature | 1 - changelog.d/15088.bugfix | 1 - changelog.d/15092.bugfix | 1 - changelog.d/15093.bugfix | 1 - changelog.d/15095.misc | 1 - changelog.d/15103.misc | 1 - changelog.d/15107.feature | 1 - changelog.d/15112.doc | 1 - changelog.d/15116.feature | 1 - changelog.d/15133.feature | 1 - changelog.d/15134.feature | 1 - changelog.d/15135.misc | 1 - changelog.d/15137.removal | 1 - changelog.d/15138.misc | 1 - changelog.d/15139.doc | 1 - changelog.d/15143.misc | 1 - changelog.d/15146.misc | 1 - changelog.d/15148.doc | 1 - changelog.d/15152.misc | 1 - changelog.d/15154.misc | 1 - changelog.d/15155.misc | 1 - changelog.d/15156.misc | 1 - changelog.d/15157.misc | 1 - changelog.d/15158.misc | 1 - changelog.d/15159.misc | 1 - changelog.d/15160.misc | 1 - changelog.d/15163.bugfix | 1 - changelog.d/15164.misc | 1 - changelog.d/15165.misc | 1 - changelog.d/15167.misc | 1 - changelog.d/15168.doc | 1 - changelog.d/15172.feature | 1 - changelog.d/15174.bugfix | 1 - changelog.d/15175.misc | 1 - changelog.d/15177.bugfix | 1 - changelog.d/15180.bugfix | 1 - changelog.d/15184.misc | 1 - changelog.d/15185.feature | 1 - changelog.d/15186.docker | 1 - changelog.d/15188.misc | 1 - changelog.d/15189.misc | 1 - changelog.d/15191.misc | 1 - changelog.d/15192.misc | 1 - changelog.d/15193.bugfix | 1 - changelog.d/15194.misc | 1 - changelog.d/15199.misc | 1 - changelog.d/15209.misc | 1 - changelog.d/15210.misc | 1 - changelog.d/15211.misc | 1 - changelog.d/15212.misc | 1 - changelog.d/15213.misc | 1 - changelog.d/15214.misc | 1 - changelog.d/15215.misc | 1 - debian/changelog | 6 +++ pyproject.toml | 2 +- 63 files changed, 102 insertions(+), 61 deletions(-) delete mode 100644 changelog.d/14026.doc delete mode 100644 changelog.d/14101.misc delete mode 100644 changelog.d/14869.bugfix delete mode 100644 changelog.d/14918.misc delete mode 100644 changelog.d/15044.feature delete mode 100644 changelog.d/15051.misc delete mode 100644 changelog.d/15071.doc delete mode 100644 changelog.d/15077.feature delete mode 100644 changelog.d/15088.bugfix delete mode 100644 changelog.d/15092.bugfix delete mode 100644 changelog.d/15093.bugfix delete mode 100644 changelog.d/15095.misc delete mode 100644 changelog.d/15103.misc delete mode 100644 changelog.d/15107.feature delete mode 100644 changelog.d/15112.doc delete mode 100644 changelog.d/15116.feature delete mode 100644 changelog.d/15133.feature delete mode 100644 changelog.d/15134.feature delete mode 100644 changelog.d/15135.misc delete mode 100644 changelog.d/15137.removal delete mode 100644 changelog.d/15138.misc delete mode 100644 changelog.d/15139.doc delete mode 100644 changelog.d/15143.misc delete mode 100644 changelog.d/15146.misc delete mode 100644 changelog.d/15148.doc delete mode 100644 changelog.d/15152.misc delete mode 100644 changelog.d/15154.misc delete mode 100644 changelog.d/15155.misc delete mode 100644 changelog.d/15156.misc delete mode 100644 changelog.d/15157.misc delete mode 100644 changelog.d/15158.misc delete mode 100644 changelog.d/15159.misc delete mode 100644 changelog.d/15160.misc delete mode 100644 changelog.d/15163.bugfix delete mode 100644 changelog.d/15164.misc delete mode 100644 changelog.d/15165.misc delete mode 100644 changelog.d/15167.misc delete mode 100644 changelog.d/15168.doc delete mode 100644 changelog.d/15172.feature delete mode 100644 changelog.d/15174.bugfix delete mode 100644 changelog.d/15175.misc delete mode 100644 changelog.d/15177.bugfix delete mode 100644 changelog.d/15180.bugfix delete mode 100644 changelog.d/15184.misc delete mode 100644 changelog.d/15185.feature delete mode 100644 changelog.d/15186.docker delete mode 100644 changelog.d/15188.misc delete mode 100644 changelog.d/15189.misc delete mode 100644 changelog.d/15191.misc delete mode 100644 changelog.d/15192.misc delete mode 100644 changelog.d/15193.bugfix delete mode 100644 changelog.d/15194.misc delete mode 100644 changelog.d/15199.misc delete mode 100644 changelog.d/15209.misc delete mode 100644 changelog.d/15210.misc delete mode 100644 changelog.d/15211.misc delete mode 100644 changelog.d/15212.misc delete mode 100644 changelog.d/15213.misc delete mode 100644 changelog.d/15214.misc delete mode 100644 changelog.d/15215.misc diff --git a/CHANGES.md b/CHANGES.md index 644ef6e036ac..fe076f2bce69 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,98 @@ +Synapse 1.79.0rc1 (2023-03-07) +============================== + +Features +-------- + +- Add two new Third Party Rules module API callbacks: [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier) and [`on_remove_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_remove_user_third_party_identifier). ([\#15044](https://github.com/matrix-org/synapse/issues/15044)) +- Experimental support for MSC3967 to not require UIA for setting up cross-signing on first use. ([\#15077](https://github.com/matrix-org/synapse/issues/15077)) +- Add media information to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.79/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#15107](https://github.com/matrix-org/synapse/issues/15107)) +- Add an [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) to delete a [specific event report](https://spec.matrix.org/v1.6/client-server-api/#reporting-content). ([\#15116](https://github.com/matrix-org/synapse/issues/15116)) +- Add support for knocking to workers. ([\#15133](https://github.com/matrix-org/synapse/issues/15133)) +- Allow use of the `/filter` Client-Server APIs on workers. ([\#15134](https://github.com/matrix-org/synapse/issues/15134)) +- Remove support for server-side aggregation of reactions. ([\#15172](https://github.com/matrix-org/synapse/issues/15172)) +- Stabilise support for [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758): `event_property_is` push condition. ([\#15185](https://github.com/matrix-org/synapse/issues/15185)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.75 that caused experimental support for deleting account data to raise an internal server error while using an account data writer worker. ([\#14869](https://github.com/matrix-org/synapse/issues/14869)) +- Fix a long-standing bug where Synapse handled an unspecced field on push rules. ([\#15088](https://github.com/matrix-org/synapse/issues/15088)) +- Fix a long-standing bug where a URL preview would break if the discovered oEmbed failed to download. ([\#15092](https://github.com/matrix-org/synapse/issues/15092)) +- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093)) +- Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. ([\#15163](https://github.com/matrix-org/synapse/issues/15163)) +- Add the `transaction_id` in the events included in many endpoints' responses. ([\#15174](https://github.com/matrix-org/synapse/issues/15174)) +- Fix `test_icu_word_boundary_punctuation` for Alpine acos installed ICU versions. ([\#15177](https://github.com/matrix-org/synapse/issues/15177)) +- Fix a bug introduced in Synapse 1.78.0 where requests to claim dehydrated devices would fail with a `405` error. ([\#15180](https://github.com/matrix-org/synapse/issues/15180)) +- Stop applying edits when bundling aggregations, per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925). ([\#15193](https://github.com/matrix-org/synapse/issues/15193)) +- Fix a long-standing bug where the user directory search was not case-insensitive for accented characters. ([\#15143](https://github.com/matrix-org/synapse/issues/15143)) + + +Updates to the Docker image +--------------------------- + +- Improve startup logging in the with-workers Docker image. ([\#15186](https://github.com/matrix-org/synapse/issues/15186)) + + +Improved Documentation +---------------------- + +- Document how to use caches in a module. ([\#14026](https://github.com/matrix-org/synapse/issues/14026)) +- Clarify which worker processes the ThirdPartyRules' [`on_new_event`](https://matrix-org.github.io/synapse/v1.78/modules/third_party_rules_callbacks.html#on_new_event) module API callback runs on. ([\#15071](https://github.com/matrix-org/synapse/issues/15071)) +- Document using [Shibboleth](https://www.shibboleth.net/) as an OpenID Provider. ([\#15112](https://github.com/matrix-org/synapse/issues/15112)) +- Correct reference to `federation_verify_certificates` in configuration documentation. ([\#15139](https://github.com/matrix-org/synapse/issues/15139)) +- Correct small documentation errors in some `MatrixFederationHttpClient` methods. ([\#15148](https://github.com/matrix-org/synapse/issues/15148)) +- Correct the description of the behavior of `registration_shared_secret_path` on startup. ([\#15168](https://github.com/matrix-org/synapse/issues/15168)) + + +Deprecations and Removals +------------------------- + +- Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137)) + + +Internal Changes +---------------- + +- Run the integration test suites with the asyncio reactor enabled in CI. ([\#14101](https://github.com/matrix-org/synapse/issues/14101)) +- Batch up storing state groups when creating a new room. ([\#14918](https://github.com/matrix-org/synapse/issues/14918)) +- Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. ([\#15051](https://github.com/matrix-org/synapse/issues/15051)) +- Refactor writing json data in `FileExfiltrationWriter`. ([\#15095](https://github.com/matrix-org/synapse/issues/15095)) +- Tighten the login ratelimit defaults. ([\#15135](https://github.com/matrix-org/synapse/issues/15135)) +- Fix a typo in an experimental config setting. ([\#15138](https://github.com/matrix-org/synapse/issues/15138)) +- Refactor the media modules. ([\#15146](https://github.com/matrix-org/synapse/issues/15146), [\#15175](https://github.com/matrix-org/synapse/issues/15175)) +- Improve type hints. ([\#15164](https://github.com/matrix-org/synapse/issues/15164)) +- Move `get_event_report` and `get_event_reports_paginate` from `RoomStore` to `RoomWorkerStore`. ([\#15165](https://github.com/matrix-org/synapse/issues/15165)) +- Remove dangling reference to being a reference implementation in docstring. ([\#15167](https://github.com/matrix-org/synapse/issues/15167)) +- Add an option to force a rebuild of the "editable" complement image. ([\#15184](https://github.com/matrix-org/synapse/issues/15184)) +- Use nightly rustfmt in CI. ([\#15188](https://github.com/matrix-org/synapse/issues/15188)) +- Add a `get_next_txn` method to `StreamIdGenerator` to match `MultiWriterIdGenerator`. ([\#15191](https://github.com/matrix-org/synapse/issues/15191)) +- Combine `AbstractStreamIdTracker` and `AbstractStreamIdGenerator`. ([\#15192](https://github.com/matrix-org/synapse/issues/15192)) +- Automatically fix errors with `ruff`. ([\#15194](https://github.com/matrix-org/synapse/issues/15194)) +- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189)) +- Remove unspecced and buggy `PUT` method on the unstable `/rooms//batch_send` endpoint. ([\#15199](https://github.com/matrix-org/synapse/issues/15199)) +- Refactor database transaction for query users' devices to reduce database pool contention. ([\#15215](https://github.com/matrix-org/synapse/issues/15215)) +- Correct `test_icu_word_boundary_punctuation` so that it passes with the ICU versions available in Alpine and macOS. ([\#15177](https://github.com/matrix-org/synapse/issues/15177)) +-
Locked dependency updates + - Bump actions/checkout from 2 to 3. ([\#15155](https://github.com/matrix-org/synapse/issues/15155)) + - Bump black from 22.12.0 to 23.1.0. ([\#15103](https://github.com/matrix-org/synapse/issues/15103)) + - Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0. ([\#15152](https://github.com/matrix-org/synapse/issues/15152)) + - Bump docker/login-action from 1 to 2. ([\#15154](https://github.com/matrix-org/synapse/issues/15154)) + - Bump matrix-org/backend-meta from 1 to 2. ([\#15156](https://github.com/matrix-org/synapse/issues/15156)) + - Bump ruff from 0.0.237 to 0.0.252. ([\#15159](https://github.com/matrix-org/synapse/issues/15159)) + - Bump serde_json from 1.0.93 to 1.0.94. ([\#15214](https://github.com/matrix-org/synapse/issues/15214)) + - Bump types-commonmark from 0.9.2.1 to 0.9.2.2. ([\#15209](https://github.com/matrix-org/synapse/issues/15209)) + - Bump types-opentracing from 2.4.10.1 to 2.4.10.3. ([\#15158](https://github.com/matrix-org/synapse/issues/15158)) + - Bump types-pillow from 9.4.0.13 to 9.4.0.17. ([\#15211](https://github.com/matrix-org/synapse/issues/15211)) + - Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8. ([\#15210](https://github.com/matrix-org/synapse/issues/15210)) + - Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4. ([\#15213](https://github.com/matrix-org/synapse/issues/15213)) + - Bump types-setuptools from 67.3.0.1 to 67.4.0.3. ([\#15160](https://github.com/matrix-org/synapse/issues/15160)) + - Bump types-setuptools from 67.4.0.3 to 67.5.0.0. ([\#15212](https://github.com/matrix-org/synapse/issues/15212)) + - Bump typing-extensions from 4.4.0 to 4.5.0. ([\#15157](https://github.com/matrix-org/synapse/issues/15157)) +
+ + Synapse 1.78.0 (2023-02-28) =========================== diff --git a/changelog.d/14026.doc b/changelog.d/14026.doc deleted file mode 100644 index 28fc5568ea0b..000000000000 --- a/changelog.d/14026.doc +++ /dev/null @@ -1 +0,0 @@ -Document how to use caches in a module. diff --git a/changelog.d/14101.misc b/changelog.d/14101.misc deleted file mode 100644 index c48f40cd3876..000000000000 --- a/changelog.d/14101.misc +++ /dev/null @@ -1 +0,0 @@ -Run the integration test suites with the asyncio reactor enabled in CI. diff --git a/changelog.d/14869.bugfix b/changelog.d/14869.bugfix deleted file mode 100644 index 865b597741bb..000000000000 --- a/changelog.d/14869.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in v1.75.0rc1 that caused experimental support for deleting account data to raise an internal server error while using an account data writer worker. \ No newline at end of file diff --git a/changelog.d/14918.misc b/changelog.d/14918.misc deleted file mode 100644 index 828794354acd..000000000000 --- a/changelog.d/14918.misc +++ /dev/null @@ -1 +0,0 @@ -Batch up storing state groups when creating a new room. \ No newline at end of file diff --git a/changelog.d/15044.feature b/changelog.d/15044.feature deleted file mode 100644 index 91e5cda8c3a9..000000000000 --- a/changelog.d/15044.feature +++ /dev/null @@ -1 +0,0 @@ -Add two new Third Party Rules module API callbacks: [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier) and [`on_remove_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_remove_user_third_party_identifier). \ No newline at end of file diff --git a/changelog.d/15051.misc b/changelog.d/15051.misc deleted file mode 100644 index fabfe77d35af..000000000000 --- a/changelog.d/15051.misc +++ /dev/null @@ -1 +0,0 @@ -Update [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952) support based on changes to the MSC. diff --git a/changelog.d/15071.doc b/changelog.d/15071.doc deleted file mode 100644 index 7fbaba3e8cf0..000000000000 --- a/changelog.d/15071.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify which worker processes the ThirdPartyRules' [`on_new_event`](https://matrix-org.github.io/synapse/v1.78/modules/third_party_rules_callbacks.html#on_new_event) module API callback runs on. \ No newline at end of file diff --git a/changelog.d/15077.feature b/changelog.d/15077.feature deleted file mode 100644 index 384e751056b7..000000000000 --- a/changelog.d/15077.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support for MSC3967 to not require UIA for setting up cross-signing on first use. diff --git a/changelog.d/15088.bugfix b/changelog.d/15088.bugfix deleted file mode 100644 index 15d5286f80f7..000000000000 --- a/changelog.d/15088.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where Synapse handled an unspecced field on push rules. diff --git a/changelog.d/15092.bugfix b/changelog.d/15092.bugfix deleted file mode 100644 index 67509c5c692a..000000000000 --- a/changelog.d/15092.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where a URL preview would break if the discovered oEmbed failed to download. diff --git a/changelog.d/15093.bugfix b/changelog.d/15093.bugfix deleted file mode 100644 index 00f1c1939171..000000000000 --- a/changelog.d/15093.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. diff --git a/changelog.d/15095.misc b/changelog.d/15095.misc deleted file mode 100644 index a2fafe2fffd8..000000000000 --- a/changelog.d/15095.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor writing json data in `FileExfiltrationWriter`. \ No newline at end of file diff --git a/changelog.d/15103.misc b/changelog.d/15103.misc deleted file mode 100644 index 65322498c90e..000000000000 --- a/changelog.d/15103.misc +++ /dev/null @@ -1 +0,0 @@ -Bump black from 22.12.0 to 23.1.0. diff --git a/changelog.d/15107.feature b/changelog.d/15107.feature deleted file mode 100644 index 2bdb6a29fceb..000000000000 --- a/changelog.d/15107.feature +++ /dev/null @@ -1 +0,0 @@ -Add media information to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.79/usage/administration/admin_faq.html#how-can-i-export-user-data). \ No newline at end of file diff --git a/changelog.d/15112.doc b/changelog.d/15112.doc deleted file mode 100644 index 7dec43a50b33..000000000000 --- a/changelog.d/15112.doc +++ /dev/null @@ -1 +0,0 @@ -Document using [Shibboleth](https://www.shibboleth.net/) as an OpenID Provider. diff --git a/changelog.d/15116.feature b/changelog.d/15116.feature deleted file mode 100644 index 087d8dc7f18e..000000000000 --- a/changelog.d/15116.feature +++ /dev/null @@ -1 +0,0 @@ -Add an [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) to delete a [specific event report](https://spec.matrix.org/v1.6/client-server-api/#reporting-content). \ No newline at end of file diff --git a/changelog.d/15133.feature b/changelog.d/15133.feature deleted file mode 100644 index e0af0d455440..000000000000 --- a/changelog.d/15133.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for knocking to workers. \ No newline at end of file diff --git a/changelog.d/15134.feature b/changelog.d/15134.feature deleted file mode 100644 index 0dbb30bc8f13..000000000000 --- a/changelog.d/15134.feature +++ /dev/null @@ -1 +0,0 @@ -Allow use of the `/filter` Client-Server APIs on workers. \ No newline at end of file diff --git a/changelog.d/15135.misc b/changelog.d/15135.misc deleted file mode 100644 index 25c4dbffe134..000000000000 --- a/changelog.d/15135.misc +++ /dev/null @@ -1 +0,0 @@ -Tighten the login ratelimit defaults. diff --git a/changelog.d/15137.removal b/changelog.d/15137.removal deleted file mode 100644 index c533b0c9dd19..000000000000 --- a/changelog.d/15137.removal +++ /dev/null @@ -1 +0,0 @@ -Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. diff --git a/changelog.d/15138.misc b/changelog.d/15138.misc deleted file mode 100644 index fb706b27f265..000000000000 --- a/changelog.d/15138.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a typo in an experimental config setting. diff --git a/changelog.d/15139.doc b/changelog.d/15139.doc deleted file mode 100644 index d8ab48b272dd..000000000000 --- a/changelog.d/15139.doc +++ /dev/null @@ -1 +0,0 @@ -Correct reference to `federation_verify_certificates` in configuration documentation. diff --git a/changelog.d/15143.misc b/changelog.d/15143.misc deleted file mode 100644 index cff4518811ec..000000000000 --- a/changelog.d/15143.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the user directory search was not case-insensitive for accented characters. diff --git a/changelog.d/15146.misc b/changelog.d/15146.misc deleted file mode 100644 index 8de5f952398f..000000000000 --- a/changelog.d/15146.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor the media modules. diff --git a/changelog.d/15148.doc b/changelog.d/15148.doc deleted file mode 100644 index 4e9e16330651..000000000000 --- a/changelog.d/15148.doc +++ /dev/null @@ -1 +0,0 @@ -Correct small documentation errors in some `MatrixFederationHttpClient` methods. \ No newline at end of file diff --git a/changelog.d/15152.misc b/changelog.d/15152.misc deleted file mode 100644 index 6b2c73d0ab31..000000000000 --- a/changelog.d/15152.misc +++ /dev/null @@ -1 +0,0 @@ -Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0. diff --git a/changelog.d/15154.misc b/changelog.d/15154.misc deleted file mode 100644 index c958b520780a..000000000000 --- a/changelog.d/15154.misc +++ /dev/null @@ -1 +0,0 @@ -Bump docker/login-action from 1 to 2. diff --git a/changelog.d/15155.misc b/changelog.d/15155.misc deleted file mode 100644 index 40c73e96ec33..000000000000 --- a/changelog.d/15155.misc +++ /dev/null @@ -1 +0,0 @@ -Bump actions/checkout from 2 to 3. diff --git a/changelog.d/15156.misc b/changelog.d/15156.misc deleted file mode 100644 index ebae4cb45698..000000000000 --- a/changelog.d/15156.misc +++ /dev/null @@ -1 +0,0 @@ -Bump matrix-org/backend-meta from 1 to 2. diff --git a/changelog.d/15157.misc b/changelog.d/15157.misc deleted file mode 100644 index 730b706dfef4..000000000000 --- a/changelog.d/15157.misc +++ /dev/null @@ -1 +0,0 @@ -Bump typing-extensions from 4.4.0 to 4.5.0. diff --git a/changelog.d/15158.misc b/changelog.d/15158.misc deleted file mode 100644 index fc0eecfd2145..000000000000 --- a/changelog.d/15158.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-opentracing from 2.4.10.1 to 2.4.10.3. diff --git a/changelog.d/15159.misc b/changelog.d/15159.misc deleted file mode 100644 index ebb857a89ef5..000000000000 --- a/changelog.d/15159.misc +++ /dev/null @@ -1 +0,0 @@ -Bump ruff from 0.0.237 to 0.0.252. diff --git a/changelog.d/15160.misc b/changelog.d/15160.misc deleted file mode 100644 index 13b098d17c51..000000000000 --- a/changelog.d/15160.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 67.3.0.1 to 67.4.0.3. diff --git a/changelog.d/15163.bugfix b/changelog.d/15163.bugfix deleted file mode 100644 index 7ff1cd4463cd..000000000000 --- a/changelog.d/15163.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. \ No newline at end of file diff --git a/changelog.d/15164.misc b/changelog.d/15164.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15164.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15165.misc b/changelog.d/15165.misc deleted file mode 100644 index a75be84dac7f..000000000000 --- a/changelog.d/15165.misc +++ /dev/null @@ -1 +0,0 @@ -Move `get_event_report` and `get_event_reports_paginate` from `RoomStore` to `RoomWorkerStore`. \ No newline at end of file diff --git a/changelog.d/15167.misc b/changelog.d/15167.misc deleted file mode 100644 index 175c2a3b8385..000000000000 --- a/changelog.d/15167.misc +++ /dev/null @@ -1 +0,0 @@ -Remove dangling reference to being a reference implementation in docstring. diff --git a/changelog.d/15168.doc b/changelog.d/15168.doc deleted file mode 100644 index dbd3c54714e4..000000000000 --- a/changelog.d/15168.doc +++ /dev/null @@ -1 +0,0 @@ -Correct the description of the behavior of `registration_shared_secret_path` on startup. diff --git a/changelog.d/15172.feature b/changelog.d/15172.feature deleted file mode 100644 index 3f789edb7f25..000000000000 --- a/changelog.d/15172.feature +++ /dev/null @@ -1 +0,0 @@ -Remove support for server-side aggregation of reactions. diff --git a/changelog.d/15174.bugfix b/changelog.d/15174.bugfix deleted file mode 100644 index a0c70cbe2252..000000000000 --- a/changelog.d/15174.bugfix +++ /dev/null @@ -1 +0,0 @@ -Add the `transaction_id` in the events included in many endpoints responses. diff --git a/changelog.d/15175.misc b/changelog.d/15175.misc deleted file mode 100644 index 8de5f952398f..000000000000 --- a/changelog.d/15175.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor the media modules. diff --git a/changelog.d/15177.bugfix b/changelog.d/15177.bugfix deleted file mode 100644 index b9764947eb2e..000000000000 --- a/changelog.d/15177.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix test_icu_word_boundary_punctuation for alpine / macos installed ICU versions. diff --git a/changelog.d/15180.bugfix b/changelog.d/15180.bugfix deleted file mode 100644 index e7a3dcd41a37..000000000000 --- a/changelog.d/15180.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.78.0 where requests to claim dehydrated devices would fail with a `405` error. diff --git a/changelog.d/15184.misc b/changelog.d/15184.misc deleted file mode 100644 index 53dba1b6bacd..000000000000 --- a/changelog.d/15184.misc +++ /dev/null @@ -1 +0,0 @@ -Add an option to force a rebuild of the "editable" complement image. diff --git a/changelog.d/15185.feature b/changelog.d/15185.feature deleted file mode 100644 index 901900bdecf7..000000000000 --- a/changelog.d/15185.feature +++ /dev/null @@ -1 +0,0 @@ -Stabilise support for [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758): `event_property_is` push condition. diff --git a/changelog.d/15186.docker b/changelog.d/15186.docker deleted file mode 100644 index 5e436ff7e2a9..000000000000 --- a/changelog.d/15186.docker +++ /dev/null @@ -1 +0,0 @@ -Improve startup logging in the with-workers Docker image. diff --git a/changelog.d/15188.misc b/changelog.d/15188.misc deleted file mode 100644 index e4e9472f0108..000000000000 --- a/changelog.d/15188.misc +++ /dev/null @@ -1 +0,0 @@ -Use nightly rustfmt in CI. diff --git a/changelog.d/15189.misc b/changelog.d/15189.misc deleted file mode 100644 index ded2feb79e4a..000000000000 --- a/changelog.d/15189.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. diff --git a/changelog.d/15191.misc b/changelog.d/15191.misc deleted file mode 100644 index 579f76d451ff..000000000000 --- a/changelog.d/15191.misc +++ /dev/null @@ -1 +0,0 @@ -Add a `get_next_txn` method to `StreamIdGenerator` to match `MultiWriterIdGenerator`. \ No newline at end of file diff --git a/changelog.d/15192.misc b/changelog.d/15192.misc deleted file mode 100644 index 107668687583..000000000000 --- a/changelog.d/15192.misc +++ /dev/null @@ -1 +0,0 @@ -Combine `AbstractStreamIdTracker` and `AbstractStreamIdGenerator`. diff --git a/changelog.d/15193.bugfix b/changelog.d/15193.bugfix deleted file mode 100644 index ca781e9631ec..000000000000 --- a/changelog.d/15193.bugfix +++ /dev/null @@ -1 +0,0 @@ -Stop applying edits when bundling aggregations, per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925). diff --git a/changelog.d/15194.misc b/changelog.d/15194.misc deleted file mode 100644 index 931bf5448a9b..000000000000 --- a/changelog.d/15194.misc +++ /dev/null @@ -1 +0,0 @@ -Automatically fix errors with `ruff`. diff --git a/changelog.d/15199.misc b/changelog.d/15199.misc deleted file mode 100644 index 145b03fe1651..000000000000 --- a/changelog.d/15199.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unspecced and buggy `PUT` method on the unstable `/rooms//batch_send` endpoint. diff --git a/changelog.d/15209.misc b/changelog.d/15209.misc deleted file mode 100644 index cb361353a8b1..000000000000 --- a/changelog.d/15209.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-commonmark from 0.9.2.1 to 0.9.2.2. diff --git a/changelog.d/15210.misc b/changelog.d/15210.misc deleted file mode 100644 index e1f64b6d6332..000000000000 --- a/changelog.d/15210.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8. diff --git a/changelog.d/15211.misc b/changelog.d/15211.misc deleted file mode 100644 index be7dfd813258..000000000000 --- a/changelog.d/15211.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pillow from 9.4.0.13 to 9.4.0.17. diff --git a/changelog.d/15212.misc b/changelog.d/15212.misc deleted file mode 100644 index 36c593f67d5c..000000000000 --- a/changelog.d/15212.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-setuptools from 67.4.0.3 to 67.5.0.0. diff --git a/changelog.d/15213.misc b/changelog.d/15213.misc deleted file mode 100644 index 370aead769db..000000000000 --- a/changelog.d/15213.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4. diff --git a/changelog.d/15214.misc b/changelog.d/15214.misc deleted file mode 100644 index 91a8cb9d7288..000000000000 --- a/changelog.d/15214.misc +++ /dev/null @@ -1 +0,0 @@ -Bump serde_json from 1.0.93 to 1.0.94. diff --git a/changelog.d/15215.misc b/changelog.d/15215.misc deleted file mode 100644 index fe52a56a7e5e..000000000000 --- a/changelog.d/15215.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor database transaction for query users' devices to reduce database pool contention. diff --git a/debian/changelog b/debian/changelog index 0f094308c1f6..871c695f07c4 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium + + * New Synapse release 1.79.0rc1. + + -- Synapse Packaging team Tue, 07 Mar 2023 12:03:49 +0000 + matrix-synapse-py3 (1.78.0) stable; urgency=medium * New Synapse release 1.78.0. diff --git a/pyproject.toml b/pyproject.toml index 27785b6e1361..90a118741641 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.78.0" +version = "1.79.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 8314646cd3563d9aaaf8028c9e58989b4ed980ba Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Mar 2023 13:30:47 +0000 Subject: [PATCH 286/344] Update changelog --- CHANGES.md | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index fe076f2bce69..25567d9ad797 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -5,12 +5,12 @@ Features -------- - Add two new Third Party Rules module API callbacks: [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier) and [`on_remove_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_remove_user_third_party_identifier). ([\#15044](https://github.com/matrix-org/synapse/issues/15044)) -- Experimental support for MSC3967 to not require UIA for setting up cross-signing on first use. ([\#15077](https://github.com/matrix-org/synapse/issues/15077)) +- Experimental support for [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967) to not require UIA for setting up cross-signing on first use. ([\#15077](https://github.com/matrix-org/synapse/issues/15077)) - Add media information to the command line [user data export tool](https://matrix-org.github.io/synapse/v1.79/usage/administration/admin_faq.html#how-can-i-export-user-data). ([\#15107](https://github.com/matrix-org/synapse/issues/15107)) - Add an [admin API](https://matrix-org.github.io/synapse/latest/usage/administration/admin_api/index.html) to delete a [specific event report](https://spec.matrix.org/v1.6/client-server-api/#reporting-content). ([\#15116](https://github.com/matrix-org/synapse/issues/15116)) - Add support for knocking to workers. ([\#15133](https://github.com/matrix-org/synapse/issues/15133)) - Allow use of the `/filter` Client-Server APIs on workers. ([\#15134](https://github.com/matrix-org/synapse/issues/15134)) -- Remove support for server-side aggregation of reactions. ([\#15172](https://github.com/matrix-org/synapse/issues/15172)) +- Update support for [MSC2677](https://github.com/matrix-org/matrix-spec-proposals/pull/2677): remove support for server-side aggregation of reactions. ([\#15172](https://github.com/matrix-org/synapse/issues/15172)) - Stabilise support for [MSC3758](https://github.com/matrix-org/matrix-spec-proposals/pull/3758): `event_property_is` push condition. ([\#15185](https://github.com/matrix-org/synapse/issues/15185)) @@ -20,7 +20,6 @@ Bugfixes - Fix a bug introduced in Synapse 1.75 that caused experimental support for deleting account data to raise an internal server error while using an account data writer worker. ([\#14869](https://github.com/matrix-org/synapse/issues/14869)) - Fix a long-standing bug where Synapse handled an unspecced field on push rules. ([\#15088](https://github.com/matrix-org/synapse/issues/15088)) - Fix a long-standing bug where a URL preview would break if the discovered oEmbed failed to download. ([\#15092](https://github.com/matrix-org/synapse/issues/15092)) -- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093)) - Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. ([\#15163](https://github.com/matrix-org/synapse/issues/15163)) - Add the `transaction_id` in the events included in many endpoints' responses. ([\#15174](https://github.com/matrix-org/synapse/issues/15174)) - Fix `test_icu_word_boundary_punctuation` for Alpine acos installed ICU versions. ([\#15177](https://github.com/matrix-org/synapse/issues/15177)) @@ -49,7 +48,11 @@ Improved Documentation Deprecations and Removals ------------------------- +- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044] +- Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093)) +- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189)) - Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137)) +- Remove unspecced and buggy `PUT` method on the unstable `/rooms//batch_send` endpoint. ([\#15199](https://github.com/matrix-org/synapse/issues/15199)) Internal Changes @@ -70,26 +73,26 @@ Internal Changes - Add a `get_next_txn` method to `StreamIdGenerator` to match `MultiWriterIdGenerator`. ([\#15191](https://github.com/matrix-org/synapse/issues/15191)) - Combine `AbstractStreamIdTracker` and `AbstractStreamIdGenerator`. ([\#15192](https://github.com/matrix-org/synapse/issues/15192)) - Automatically fix errors with `ruff`. ([\#15194](https://github.com/matrix-org/synapse/issues/15194)) -- Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189)) -- Remove unspecced and buggy `PUT` method on the unstable `/rooms//batch_send` endpoint. ([\#15199](https://github.com/matrix-org/synapse/issues/15199)) - Refactor database transaction for query users' devices to reduce database pool contention. ([\#15215](https://github.com/matrix-org/synapse/issues/15215)) - Correct `test_icu_word_boundary_punctuation` so that it passes with the ICU versions available in Alpine and macOS. ([\#15177](https://github.com/matrix-org/synapse/issues/15177)) --
Locked dependency updates - - Bump actions/checkout from 2 to 3. ([\#15155](https://github.com/matrix-org/synapse/issues/15155)) - - Bump black from 22.12.0 to 23.1.0. ([\#15103](https://github.com/matrix-org/synapse/issues/15103)) - - Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0. ([\#15152](https://github.com/matrix-org/synapse/issues/15152)) - - Bump docker/login-action from 1 to 2. ([\#15154](https://github.com/matrix-org/synapse/issues/15154)) - - Bump matrix-org/backend-meta from 1 to 2. ([\#15156](https://github.com/matrix-org/synapse/issues/15156)) - - Bump ruff from 0.0.237 to 0.0.252. ([\#15159](https://github.com/matrix-org/synapse/issues/15159)) - - Bump serde_json from 1.0.93 to 1.0.94. ([\#15214](https://github.com/matrix-org/synapse/issues/15214)) - - Bump types-commonmark from 0.9.2.1 to 0.9.2.2. ([\#15209](https://github.com/matrix-org/synapse/issues/15209)) - - Bump types-opentracing from 2.4.10.1 to 2.4.10.3. ([\#15158](https://github.com/matrix-org/synapse/issues/15158)) - - Bump types-pillow from 9.4.0.13 to 9.4.0.17. ([\#15211](https://github.com/matrix-org/synapse/issues/15211)) - - Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8. ([\#15210](https://github.com/matrix-org/synapse/issues/15210)) - - Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4. ([\#15213](https://github.com/matrix-org/synapse/issues/15213)) - - Bump types-setuptools from 67.3.0.1 to 67.4.0.3. ([\#15160](https://github.com/matrix-org/synapse/issues/15160)) - - Bump types-setuptools from 67.4.0.3 to 67.5.0.0. ([\#15212](https://github.com/matrix-org/synapse/issues/15212)) - - Bump typing-extensions from 4.4.0 to 4.5.0. ([\#15157](https://github.com/matrix-org/synapse/issues/15157)) + +
Locked dependency updates + + - Bump actions/checkout from 2 to 3. ([\#15155](https://github.com/matrix-org/synapse/issues/15155)) + - Bump black from 22.12.0 to 23.1.0. ([\#15103](https://github.com/matrix-org/synapse/issues/15103)) + - Bump dawidd6/action-download-artifact from 2.25.0 to 2.26.0. ([\#15152](https://github.com/matrix-org/synapse/issues/15152)) + - Bump docker/login-action from 1 to 2. ([\#15154](https://github.com/matrix-org/synapse/issues/15154)) + - Bump matrix-org/backend-meta from 1 to 2. ([\#15156](https://github.com/matrix-org/synapse/issues/15156)) + - Bump ruff from 0.0.237 to 0.0.252. ([\#15159](https://github.com/matrix-org/synapse/issues/15159)) + - Bump serde_json from 1.0.93 to 1.0.94. ([\#15214](https://github.com/matrix-org/synapse/issues/15214)) + - Bump types-commonmark from 0.9.2.1 to 0.9.2.2. ([\#15209](https://github.com/matrix-org/synapse/issues/15209)) + - Bump types-opentracing from 2.4.10.1 to 2.4.10.3. ([\#15158](https://github.com/matrix-org/synapse/issues/15158)) + - Bump types-pillow from 9.4.0.13 to 9.4.0.17. ([\#15211](https://github.com/matrix-org/synapse/issues/15211)) + - Bump types-psycopg2 from 2.9.21.4 to 2.9.21.8. ([\#15210](https://github.com/matrix-org/synapse/issues/15210)) + - Bump types-pyopenssl from 22.1.0.2 to 23.0.0.4. ([\#15213](https://github.com/matrix-org/synapse/issues/15213)) + - Bump types-setuptools from 67.3.0.1 to 67.4.0.3. ([\#15160](https://github.com/matrix-org/synapse/issues/15160)) + - Bump types-setuptools from 67.4.0.3 to 67.5.0.0. ([\#15212](https://github.com/matrix-org/synapse/issues/15212)) + - Bump typing-extensions from 4.4.0 to 4.5.0. ([\#15157](https://github.com/matrix-org/synapse/issues/15157))
From 2af1a982c17c599409192969c3aeb547f0998ee6 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Mar 2023 13:34:06 +0000 Subject: [PATCH 287/344] Remove duplicate entry from changelog --- CHANGES.md | 1 - 1 file changed, 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 25567d9ad797..212ebe2f36e7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -22,7 +22,6 @@ Bugfixes - Fix a long-standing bug where a URL preview would break if the discovered oEmbed failed to download. ([\#15092](https://github.com/matrix-org/synapse/issues/15092)) - Fix a long-standing bug where an initial sync would not respond to changes to the list of ignored users if there was an initial sync cached. ([\#15163](https://github.com/matrix-org/synapse/issues/15163)) - Add the `transaction_id` in the events included in many endpoints' responses. ([\#15174](https://github.com/matrix-org/synapse/issues/15174)) -- Fix `test_icu_word_boundary_punctuation` for Alpine acos installed ICU versions. ([\#15177](https://github.com/matrix-org/synapse/issues/15177)) - Fix a bug introduced in Synapse 1.78.0 where requests to claim dehydrated devices would fail with a `405` error. ([\#15180](https://github.com/matrix-org/synapse/issues/15180)) - Stop applying edits when bundling aggregations, per [MSC3925](https://github.com/matrix-org/matrix-spec-proposals/pull/3925). ([\#15193](https://github.com/matrix-org/synapse/issues/15193)) - Fix a long-standing bug where the user directory search was not case-insensitive for accented characters. ([\#15143](https://github.com/matrix-org/synapse/issues/15143)) From 820f02b70badfc04d35c95f8ffb9682c8310e91e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Mar 2023 10:06:02 -0500 Subject: [PATCH 288/344] Stabilize support for MSC3966: event_property_contains push condition. (#15187) This removes the configuration flag & updates the identifiers to use the stable version. --- changelog.d/15187.feature | 1 + rust/benches/evaluator.rs | 4 ---- rust/src/push/evaluator.rs | 22 +++++---------------- rust/src/push/mod.rs | 8 ++------ stubs/synapse/synapse_rust/push.pyi | 1 - synapse/config/experimental.py | 10 ++-------- synapse/push/bulk_push_rule_evaluator.py | 1 - tests/push/test_bulk_push_rule_evaluator.py | 18 ++--------------- tests/push/test_push_rule_evaluator.py | 3 +-- 9 files changed, 13 insertions(+), 55 deletions(-) create mode 100644 changelog.d/15187.feature diff --git a/changelog.d/15187.feature b/changelog.d/15187.feature new file mode 100644 index 000000000000..f2b768925521 --- /dev/null +++ b/changelog.d/15187.feature @@ -0,0 +1 @@ +Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 79b553dbb0d3..64e13f6486e8 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -52,7 +52,6 @@ fn bench_match_exact(b: &mut Bencher) { true, vec![], false, - false, ) .unwrap(); @@ -98,7 +97,6 @@ fn bench_match_word(b: &mut Bencher) { true, vec![], false, - false, ) .unwrap(); @@ -144,7 +142,6 @@ fn bench_match_word_miss(b: &mut Bencher) { true, vec![], false, - false, ) .unwrap(); @@ -190,7 +187,6 @@ fn bench_eval_message(b: &mut Bencher) { true, vec![], false, - false, ) .unwrap(); diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 67fe6a4823fe..6941c61ea486 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -96,9 +96,6 @@ pub struct PushRuleEvaluator { /// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same /// flag as MSC1767 (extensible events core). msc3931_enabled: bool, - - /// If MSC3966 (exact_event_property_contains push rule condition) is enabled. - msc3966_exact_event_property_contains: bool, } #[pymethods] @@ -116,7 +113,6 @@ impl PushRuleEvaluator { related_event_match_enabled: bool, room_version_feature_flags: Vec, msc3931_enabled: bool, - msc3966_exact_event_property_contains: bool, ) -> Result { let body = match flattened_keys.get("content.body") { Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone(), @@ -134,7 +130,6 @@ impl PushRuleEvaluator { related_event_match_enabled, room_version_feature_flags, msc3931_enabled, - msc3966_exact_event_property_contains, }) } @@ -301,8 +296,8 @@ impl PushRuleEvaluator { Some(Cow::Borrowed(pattern)), )? } - KnownCondition::ExactEventPropertyContains(event_property_is) => self - .match_exact_event_property_contains( + KnownCondition::EventPropertyContains(event_property_is) => self + .match_event_property_contains( event_property_is.key.clone(), event_property_is.value.clone(), )?, @@ -321,7 +316,7 @@ impl PushRuleEvaluator { EventMatchPatternType::UserLocalpart => get_localpart_from_id(user_id)?, }; - self.match_exact_event_property_contains( + self.match_event_property_contains( exact_event_match.key.clone(), Cow::Borrowed(&SimpleJsonValue::Str(pattern.to_string())), )? @@ -454,17 +449,12 @@ impl PushRuleEvaluator { } } - /// Evaluates a `exact_event_property_contains` condition. (MSC3966) - fn match_exact_event_property_contains( + /// Evaluates a `event_property_contains` condition. + fn match_event_property_contains( &self, key: Cow, value: Cow, ) -> Result { - // First check if the feature is enabled. - if !self.msc3966_exact_event_property_contains { - return Ok(false); - } - let haystack = if let Some(JsonValue::Array(haystack)) = self.flattened_keys.get(&*key) { haystack } else { @@ -515,7 +505,6 @@ fn push_rule_evaluator() { true, vec![], true, - true, ) .unwrap(); @@ -545,7 +534,6 @@ fn test_requires_room_version_supports_condition() { false, flags, true, - true, ) .unwrap(); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 7fde88e8256a..575a1c1e682d 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -337,13 +337,9 @@ pub enum KnownCondition { // Identical to related_event_match but gives predefined patterns. Cannot be added by users. #[serde(skip_deserializing, rename = "im.nheko.msc3664.related_event_match")] RelatedEventMatchType(RelatedEventMatchTypeCondition), - #[serde(rename = "org.matrix.msc3966.exact_event_property_contains")] - ExactEventPropertyContains(EventPropertyIsCondition), + EventPropertyContains(EventPropertyIsCondition), // Identical to exact_event_property_contains but gives predefined patterns. Cannot be added by users. - #[serde( - skip_deserializing, - rename = "org.matrix.msc3966.exact_event_property_contains" - )] + #[serde(skip_deserializing, rename = "event_property_contains")] ExactEventPropertyContainsType(EventPropertyIsTypeCondition), ContainsDisplayName, RoomMemberCount { diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi index c040944aac55..5d0ce4b1a4bd 100644 --- a/stubs/synapse/synapse_rust/push.pyi +++ b/stubs/synapse/synapse_rust/push.pyi @@ -65,7 +65,6 @@ class PushRuleEvaluator: related_event_match_enabled: bool, room_version_feature_flags: Tuple[str, ...], msc3931_enabled: bool, - msc3966_exact_event_property_contains: bool, ): ... def run( self, diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 489f2601acfa..9ff382ccc3d0 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -171,15 +171,9 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "msc3873_escape_event_match_key", False ) - # MSC3966: exact_event_property_contains push rule condition. - self.msc3966_exact_event_property_contains = experimental.get( - "msc3966_exact_event_property_contains", False - ) - # MSC3952: Intentional mentions, this depends on MSC3966. - self.msc3952_intentional_mentions = ( - experimental.get("msc3952_intentional_mentions", False) - and self.msc3966_exact_event_property_contains + self.msc3952_intentional_mentions = experimental.get( + "msc3952_intentional_mentions", False ) # MSC3959: Do not generate notifications for edits. diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index ba12b6d79a18..45622a9e9b2a 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -413,7 +413,6 @@ async def _action_for_event_by_user( self._related_event_match_enabled, event.room_version.msc3931_push_features, self.hs.config.experimental.msc1767_enabled, # MSC3931 flag - self.hs.config.experimental.msc3966_exact_event_property_contains, ) users = rules_by_user.keys() diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index c6591c50de76..46df0102f77a 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -228,14 +228,7 @@ def _create_and_process( ) return len(result) > 0 - @override_config( - { - "experimental_features": { - "msc3952_intentional_mentions": True, - "msc3966_exact_event_property_contains": True, - } - } - ) + @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) def test_user_mentions(self) -> None: """Test the behavior of an event which includes invalid user mentions.""" bulk_evaluator = BulkPushRuleEvaluator(self.hs) @@ -331,14 +324,7 @@ def test_user_mentions(self) -> None: ) ) - @override_config( - { - "experimental_features": { - "msc3952_intentional_mentions": True, - "msc3966_exact_event_property_contains": True, - } - } - ) + @override_config({"experimental_features": {"msc3952_intentional_mentions": True}}) def test_room_mentions(self) -> None: """Test the behavior of an event which includes invalid room mentions.""" bulk_evaluator = BulkPushRuleEvaluator(self.hs) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index ff5a9a66f5fe..6deee0fd0275 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -173,7 +173,6 @@ def _get_evaluator( related_event_match_enabled=True, room_version_feature_flags=event.room_version.msc3931_push_features, msc3931_enabled=True, - msc3966_exact_event_property_contains=True, ) def test_display_name(self) -> None: @@ -526,7 +525,7 @@ def test_exact_event_property_contains(self) -> None: """Check that exact_event_property_contains conditions work as expected.""" condition = { - "kind": "org.matrix.msc3966.exact_event_property_contains", + "kind": "event_property_contains", "key": "content.value", "value": "foobaz", } From 47bc84dd53b81523d9400af462dcff68185b22fd Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 7 Mar 2023 17:05:22 +0100 Subject: [PATCH 289/344] Pass the Requester down to the HttpTransactionCache. (#15200) --- changelog.d/15200.misc | 1 + synapse/rest/admin/server_notice_servlet.py | 34 ++-- synapse/rest/client/room.py | 174 ++++++++++++-------- synapse/rest/client/sendtodevice.py | 25 ++- synapse/rest/client/transactions.py | 55 +++---- tests/rest/client/test_transactions.py | 55 +++++-- 6 files changed, 215 insertions(+), 129 deletions(-) create mode 100644 changelog.d/15200.misc diff --git a/changelog.d/15200.misc b/changelog.d/15200.misc new file mode 100644 index 000000000000..dc6617222650 --- /dev/null +++ b/changelog.d/15200.misc @@ -0,0 +1 @@ +Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. diff --git a/synapse/rest/admin/server_notice_servlet.py b/synapse/rest/admin/server_notice_servlet.py index 15da9cd88153..7dd1c10b91b4 100644 --- a/synapse/rest/admin/server_notice_servlet.py +++ b/synapse/rest/admin/server_notice_servlet.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. from http import HTTPStatus -from typing import TYPE_CHECKING, Awaitable, Optional, Tuple +from typing import TYPE_CHECKING, Optional, Tuple from synapse.api.constants import EventTypes from synapse.api.errors import NotFoundError, SynapseError @@ -23,10 +23,10 @@ parse_json_object_from_request, ) from synapse.http.site import SynapseRequest -from synapse.rest.admin import assert_requester_is_admin -from synapse.rest.admin._base import admin_patterns +from synapse.logging.opentracing import set_tag +from synapse.rest.admin._base import admin_patterns, assert_user_is_admin from synapse.rest.client.transactions import HttpTransactionCache -from synapse.types import JsonDict, UserID +from synapse.types import JsonDict, Requester, UserID if TYPE_CHECKING: from synapse.server import HomeServer @@ -70,10 +70,13 @@ def register(self, json_resource: HttpServer) -> None: self.__class__.__name__, ) - async def on_POST( - self, request: SynapseRequest, txn_id: Optional[str] = None + async def _do( + self, + request: SynapseRequest, + requester: Requester, + txn_id: Optional[str], ) -> Tuple[int, JsonDict]: - await assert_requester_is_admin(self.auth, request) + await assert_user_is_admin(self.auth, requester) body = parse_json_object_from_request(request) assert_params_in_dict(body, ("user_id", "content")) event_type = body.get("type", EventTypes.Message) @@ -106,9 +109,18 @@ async def on_POST( return HTTPStatus.OK, {"event_id": event.event_id} - def on_PUT( + async def on_POST( + self, + request: SynapseRequest, + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + return await self._do(request, requester, None) + + async def on_PUT( self, request: SynapseRequest, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, txn_id + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + set_tag("txn_id", txn_id) + return await self.txns.fetch_or_execute_request( + request, requester, self._do, request, requester, txn_id ) diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 61e4cf0213a3..129b6fe6b07d 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -57,7 +57,7 @@ from synapse.rest.client._base import client_patterns from synapse.rest.client.transactions import HttpTransactionCache from synapse.streams.config import PaginationConfig -from synapse.types import JsonDict, StreamToken, ThirdPartyInstanceID, UserID +from synapse.types import JsonDict, Requester, StreamToken, ThirdPartyInstanceID, UserID from synapse.types.state import StateFilter from synapse.util import json_decoder from synapse.util.cancellation import cancellable @@ -151,15 +151,22 @@ def register(self, http_server: HttpServer) -> None: PATTERNS = "/createRoom" register_txn_path(self, PATTERNS, http_server) - def on_PUT( + async def on_PUT( self, request: SynapseRequest, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request(request, self.on_POST, request) + return await self.txns.fetch_or_execute_request( + request, requester, self._do, request, requester + ) async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) + return await self._do(request, requester) + async def _do( + self, request: SynapseRequest, requester: Requester + ) -> Tuple[int, JsonDict]: room_id, _, _ = await self._room_creation_handler.create_room( requester, self.get_room_config(request) ) @@ -172,9 +179,9 @@ def get_room_config(self, request: Request) -> JsonDict: # TODO: Needs unit testing for generic events -class RoomStateEventRestServlet(TransactionRestServlet): +class RoomStateEventRestServlet(RestServlet): def __init__(self, hs: "HomeServer"): - super().__init__(hs) + super().__init__() self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self.message_handler = hs.get_message_handler() @@ -324,16 +331,16 @@ def __init__(self, hs: "HomeServer"): def register(self, http_server: HttpServer) -> None: # /rooms/$roomid/send/$event_type[/$txn_id] PATTERNS = "/rooms/(?P[^/]*)/send/(?P[^/]*)" - register_txn_path(self, PATTERNS, http_server, with_get=True) + register_txn_path(self, PATTERNS, http_server) - async def on_POST( + async def _do( self, request: SynapseRequest, + requester: Requester, room_id: str, event_type: str, - txn_id: Optional[str] = None, + txn_id: Optional[str], ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) content = parse_json_object_from_request(request) event_dict: JsonDict = { @@ -362,18 +369,30 @@ async def on_POST( set_tag("event_id", event_id) return 200, {"event_id": event_id} - def on_GET( - self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str - ) -> Tuple[int, str]: - return 200, "Not implemented" + async def on_POST( + self, + request: SynapseRequest, + room_id: str, + event_type: str, + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) + return await self._do(request, requester, room_id, event_type, None) - def on_PUT( + async def on_PUT( self, request: SynapseRequest, room_id: str, event_type: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_id, event_type, txn_id + return await self.txns.fetch_or_execute_request( + request, + requester, + self._do, + request, + requester, + room_id, + event_type, + txn_id, ) @@ -389,14 +408,13 @@ def register(self, http_server: HttpServer) -> None: PATTERNS = "/join/(?P[^/]*)" register_txn_path(self, PATTERNS, http_server) - async def on_POST( + async def _do( self, request: SynapseRequest, + requester: Requester, room_identifier: str, - txn_id: Optional[str] = None, + txn_id: Optional[str], ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - content = parse_json_object_from_request(request, allow_empty_body=True) # twisted.web.server.Request.args is incorrectly defined as Optional[Any] @@ -420,22 +438,31 @@ async def on_POST( return 200, {"room_id": room_id} - def on_PUT( + async def on_POST( + self, + request: SynapseRequest, + room_identifier: str, + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) + return await self._do(request, requester, room_identifier, None) + + async def on_PUT( self, request: SynapseRequest, room_identifier: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_identifier, txn_id + return await self.txns.fetch_or_execute_request( + request, requester, self._do, request, requester, room_identifier, txn_id ) # TODO: Needs unit testing -class PublicRoomListRestServlet(TransactionRestServlet): +class PublicRoomListRestServlet(RestServlet): PATTERNS = client_patterns("/publicRooms$", v1=True) def __init__(self, hs: "HomeServer"): - super().__init__(hs) + super().__init__() self.hs = hs self.auth = hs.get_auth() @@ -907,22 +934,25 @@ def register(self, http_server: HttpServer) -> None: PATTERNS = "/rooms/(?P[^/]*)/forget" register_txn_path(self, PATTERNS, http_server) - async def on_POST( - self, request: SynapseRequest, room_id: str, txn_id: Optional[str] = None - ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=False) - + async def _do(self, requester: Requester, room_id: str) -> Tuple[int, JsonDict]: await self.room_member_handler.forget(user=requester.user, room_id=room_id) return 200, {} - def on_PUT( + async def on_POST( + self, request: SynapseRequest, room_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=False) + return await self._do(requester, room_id) + + async def on_PUT( self, request: SynapseRequest, room_id: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=False) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_id, txn_id + return await self.txns.fetch_or_execute_request( + request, requester, self._do, requester, room_id ) @@ -941,15 +971,14 @@ def register(self, http_server: HttpServer) -> None: ) register_txn_path(self, PATTERNS, http_server) - async def on_POST( + async def _do( self, request: SynapseRequest, + requester: Requester, room_id: str, membership_action: str, - txn_id: Optional[str] = None, + txn_id: Optional[str], ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - if requester.is_guest and membership_action not in { Membership.JOIN, Membership.LEAVE, @@ -1014,13 +1043,30 @@ async def on_POST( return 200, return_value - def on_PUT( + async def on_POST( + self, + request: SynapseRequest, + room_id: str, + membership_action: str, + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) + return await self._do(request, requester, room_id, membership_action, None) + + async def on_PUT( self, request: SynapseRequest, room_id: str, membership_action: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_id, membership_action, txn_id + return await self.txns.fetch_or_execute_request( + request, + requester, + self._do, + request, + requester, + room_id, + membership_action, + txn_id, ) @@ -1036,14 +1082,14 @@ def register(self, http_server: HttpServer) -> None: PATTERNS = "/rooms/(?P[^/]*)/redact/(?P[^/]*)" register_txn_path(self, PATTERNS, http_server) - async def on_POST( + async def _do( self, request: SynapseRequest, + requester: Requester, room_id: str, event_id: str, - txn_id: Optional[str] = None, + txn_id: Optional[str], ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request) content = parse_json_object_from_request(request) try: @@ -1094,13 +1140,23 @@ async def on_POST( set_tag("event_id", event_id) return 200, {"event_id": event_id} - def on_PUT( + async def on_POST( + self, + request: SynapseRequest, + room_id: str, + event_id: str, + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + return await self._do(request, requester, room_id, event_id, None) + + async def on_PUT( self, request: SynapseRequest, room_id: str, event_id: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self.on_POST, request, room_id, event_id, txn_id + return await self.txns.fetch_or_execute_request( + request, requester, self._do, request, requester, room_id, event_id, txn_id ) @@ -1224,7 +1280,6 @@ def register_txn_path( servlet: RestServlet, regex_string: str, http_server: HttpServer, - with_get: bool = False, ) -> None: """Registers a transaction-based path. @@ -1236,7 +1291,6 @@ def register_txn_path( regex_string: The regex string to register. Must NOT have a trailing $ as this string will be appended to. http_server: The http_server to register paths with. - with_get: True to also register respective GET paths for the PUTs. """ on_POST = getattr(servlet, "on_POST", None) on_PUT = getattr(servlet, "on_PUT", None) @@ -1254,18 +1308,6 @@ def register_txn_path( on_PUT, servlet.__class__.__name__, ) - on_GET = getattr(servlet, "on_GET", None) - if with_get: - if on_GET is None: - raise RuntimeError( - "register_txn_path called with with_get = True, but no on_GET method exists" - ) - http_server.register_paths( - "GET", - client_patterns(regex_string + "/(?P[^/]*)$", v1=True), - on_GET, - servlet.__class__.__name__, - ) class TimestampLookupRestServlet(RestServlet): diff --git a/synapse/rest/client/sendtodevice.py b/synapse/rest/client/sendtodevice.py index 55d52f0b2830..110af6df473f 100644 --- a/synapse/rest/client/sendtodevice.py +++ b/synapse/rest/client/sendtodevice.py @@ -13,7 +13,7 @@ # limitations under the License. import logging -from typing import TYPE_CHECKING, Awaitable, Tuple +from typing import TYPE_CHECKING, Tuple from synapse.http import servlet from synapse.http.server import HttpServer @@ -21,7 +21,7 @@ from synapse.http.site import SynapseRequest from synapse.logging.opentracing import set_tag from synapse.rest.client.transactions import HttpTransactionCache -from synapse.types import JsonDict +from synapse.types import JsonDict, Requester from ._base import client_patterns @@ -43,19 +43,26 @@ def __init__(self, hs: "HomeServer"): self.txns = HttpTransactionCache(hs) self.device_message_handler = hs.get_device_message_handler() - def on_PUT( + async def on_PUT( self, request: SynapseRequest, message_type: str, txn_id: str - ) -> Awaitable[Tuple[int, JsonDict]]: + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request, allow_guest=True) set_tag("txn_id", txn_id) - return self.txns.fetch_or_execute_request( - request, self._put, request, message_type, txn_id + return await self.txns.fetch_or_execute_request( + request, + requester, + self._put, + request, + requester, + message_type, ) async def _put( - self, request: SynapseRequest, message_type: str, txn_id: str + self, + request: SynapseRequest, + requester: Requester, + message_type: str, ) -> Tuple[int, JsonDict]: - requester = await self.auth.get_user_by_req(request, allow_guest=True) - content = parse_json_object_from_request(request) assert_params_in_dict(content, ("messages",)) diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 3f40f1874a8b..f2aaab622740 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -15,16 +15,16 @@ """This module contains logic for storing HTTP PUT transactions. This is used to ensure idempotency when performing PUTs using the REST API.""" import logging -from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Tuple +from typing import TYPE_CHECKING, Awaitable, Callable, Dict, Hashable, Tuple from typing_extensions import ParamSpec from twisted.internet.defer import Deferred from twisted.python.failure import Failure -from twisted.web.server import Request +from twisted.web.iweb import IRequest from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.types import JsonDict +from synapse.types import JsonDict, Requester from synapse.util.async_helpers import ObservableDeferred if TYPE_CHECKING: @@ -41,53 +41,47 @@ class HttpTransactionCache: def __init__(self, hs: "HomeServer"): self.hs = hs - self.auth = self.hs.get_auth() self.clock = self.hs.get_clock() # $txn_key: (ObservableDeferred<(res_code, res_json_body)>, timestamp) self.transactions: Dict[ - str, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int] + Hashable, Tuple[ObservableDeferred[Tuple[int, JsonDict]], int] ] = {} # Try to clean entries every 30 mins. This means entries will exist # for at *LEAST* 30 mins, and at *MOST* 60 mins. self.cleaner = self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS) - def _get_transaction_key(self, request: Request) -> str: + def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable: """A helper function which returns a transaction key that can be used with TransactionCache for idempotent requests. Idempotency is based on the returned key being the same for separate requests to the same endpoint. The key is formed from the HTTP request - path and the access_token for the requesting user. + path and attributes from the requester: the access_token_id for regular users, + the user ID for guest users, and the appservice ID for appservice users. Args: - request: The incoming request. Must contain an access_token. + request: The incoming request. + requester: The requester doing the request. Returns: A transaction key """ assert request.path is not None - token = self.auth.get_access_token_from_request(request) - return request.path.decode("utf8") + "/" + token + path: str = request.path.decode("utf8") + if requester.is_guest: + assert requester.user is not None, "Guest requester must have a user ID set" + return (path, "guest", requester.user) + elif requester.app_service is not None: + return (path, "appservice", requester.app_service.id) + else: + assert ( + requester.access_token_id is not None + ), "Requester must have an access_token_id" + return (path, "user", requester.access_token_id) def fetch_or_execute_request( self, - request: Request, - fn: Callable[P, Awaitable[Tuple[int, JsonDict]]], - *args: P.args, - **kwargs: P.kwargs, - ) -> Awaitable[Tuple[int, JsonDict]]: - """A helper function for fetch_or_execute which extracts - a transaction key from the given request. - - See: - fetch_or_execute - """ - return self.fetch_or_execute( - self._get_transaction_key(request), fn, *args, **kwargs - ) - - def fetch_or_execute( - self, - txn_key: str, + request: IRequest, + requester: Requester, fn: Callable[P, Awaitable[Tuple[int, JsonDict]]], *args: P.args, **kwargs: P.kwargs, @@ -96,14 +90,15 @@ def fetch_or_execute( to produce a response for this transaction. Args: - txn_key: A key to ensure idempotency should fetch_or_execute be - called again at a later point in time. + request: + requester: fn: A function which returns a tuple of (response_code, response_dict). *args: Arguments to pass to fn. **kwargs: Keyword arguments to pass to fn. Returns: Deferred which resolves to a tuple of (response_code, response_dict). """ + txn_key = self._get_transaction_key(request, requester) if txn_key in self.transactions: observable = self.transactions[txn_key][0] else: diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 3086e1b5650b..d8dc56261ac1 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -39,15 +39,23 @@ def setUp(self) -> None: self.cache = HttpTransactionCache(self.hs) self.mock_http_response = (HTTPStatus.OK, {"result": "GOOD JOB!"}) - self.mock_key = "foo" + + # Here we make sure that we're setting all the fields that HttpTransactionCache + # uses to build the transaction key. + self.mock_request = Mock() + self.mock_request.path = b"/foo/bar" + self.mock_requester = Mock() + self.mock_requester.app_service = None + self.mock_requester.is_guest = False + self.mock_requester.access_token_id = 1234 @defer.inlineCallbacks def test_executes_given_function( self, ) -> Generator["defer.Deferred[Any]", object, None]: cb = Mock(return_value=make_awaitable(self.mock_http_response)) - res = yield self.cache.fetch_or_execute( - self.mock_key, cb, "some_arg", keyword="arg" + res = yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb, "some_arg", keyword="arg" ) cb.assert_called_once_with("some_arg", keyword="arg") self.assertEqual(res, self.mock_http_response) @@ -58,8 +66,13 @@ def test_deduplicates_based_on_key( ) -> Generator["defer.Deferred[Any]", object, None]: cb = Mock(return_value=make_awaitable(self.mock_http_response)) for i in range(3): # invoke multiple times - res = yield self.cache.fetch_or_execute( - self.mock_key, cb, "some_arg", keyword="arg", changing_args=i + res = yield self.cache.fetch_or_execute_request( + self.mock_request, + self.mock_requester, + cb, + "some_arg", + keyword="arg", + changing_args=i, ) self.assertEqual(res, self.mock_http_response) # expect only a single call to do the work @@ -77,7 +90,9 @@ def cb() -> Generator["defer.Deferred[object]", object, Tuple[int, JsonDict]]: @defer.inlineCallbacks def test() -> Generator["defer.Deferred[Any]", object, None]: with LoggingContext("c") as c1: - res = yield self.cache.fetch_or_execute(self.mock_key, cb) + res = yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb + ) self.assertIs(current_context(), c1) self.assertEqual(res, (1, {})) @@ -106,12 +121,16 @@ def cb() -> "defer.Deferred[Tuple[int, JsonDict]]": with LoggingContext("test") as test_context: try: - yield self.cache.fetch_or_execute(self.mock_key, cb) + yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb + ) except Exception as e: self.assertEqual(e.args[0], "boo") self.assertIs(current_context(), test_context) - res = yield self.cache.fetch_or_execute(self.mock_key, cb) + res = yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb + ) self.assertEqual(res, self.mock_http_response) self.assertIs(current_context(), test_context) @@ -134,29 +153,39 @@ def cb() -> "defer.Deferred[Tuple[int, JsonDict]]": with LoggingContext("test") as test_context: try: - yield self.cache.fetch_or_execute(self.mock_key, cb) + yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb + ) except Exception as e: self.assertEqual(e.args[0], "boo") self.assertIs(current_context(), test_context) - res = yield self.cache.fetch_or_execute(self.mock_key, cb) + res = yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb + ) self.assertEqual(res, self.mock_http_response) self.assertIs(current_context(), test_context) @defer.inlineCallbacks def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]: cb = Mock(return_value=make_awaitable(self.mock_http_response)) - yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg") + yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb, "an arg" + ) # should NOT have cleaned up yet self.clock.advance_time_msec(CLEANUP_PERIOD_MS / 2) - yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg") + yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb, "an arg" + ) # still using cache cb.assert_called_once_with("an arg") self.clock.advance_time_msec(CLEANUP_PERIOD_MS) - yield self.cache.fetch_or_execute(self.mock_key, cb, "an arg") + yield self.cache.fetch_or_execute_request( + self.mock_request, self.mock_requester, cb, "an arg" + ) # no longer using cache self.assertEqual(cb.call_count, 2) self.assertEqual(cb.call_args_list, [call("an arg"), call("an arg")]) From 20ed8c926b518809e67e4d1696189413e851d2e4 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 7 Mar 2023 11:27:57 -0500 Subject: [PATCH 290/344] Stabilize support for MSC3873: disambuguated event push keys. (#15190) This removes the experimental configuration option and always escapes the push rule condition keys. Also escapes any (experimental) push rule condition keys in the base rules which contain dot in a field name. --- changelog.d/15190.bugfix | 1 + rust/src/push/base_rules.rs | 6 ++--- synapse/config/experimental.py | 10 ------- synapse/push/bulk_push_rule_evaluator.py | 33 ++++++------------------ tests/push/test_push_rule_evaluator.py | 10 +++---- 5 files changed, 15 insertions(+), 45 deletions(-) create mode 100644 changelog.d/15190.bugfix diff --git a/changelog.d/15190.bugfix b/changelog.d/15190.bugfix new file mode 100644 index 000000000000..5c3a86320ec8 --- /dev/null +++ b/changelog.d/15190.bugfix @@ -0,0 +1 @@ +Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index ec8d96656ace..d7c73c1f25c1 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -71,7 +71,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ priority_class: 5, conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( EventMatchCondition { - key: Cow::Borrowed("content.m.relates_to.rel_type"), + key: Cow::Borrowed("content.m\\.relates_to.rel_type"), pattern: Cow::Borrowed("m.replace"), }, ))]), @@ -146,7 +146,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ priority_class: 5, conditions: Cow::Borrowed(&[Condition::Known( KnownCondition::ExactEventPropertyContainsType(EventPropertyIsTypeCondition { - key: Cow::Borrowed("content.org.matrix.msc3952.mentions.user_ids"), + key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.user_ids"), value_type: Cow::Borrowed(&EventMatchPatternType::UserId), }), )]), @@ -167,7 +167,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ priority_class: 5, conditions: Cow::Borrowed(&[ Condition::Known(KnownCondition::EventPropertyIs(EventPropertyIsCondition { - key: Cow::Borrowed("content.org.matrix.msc3952.mentions.room"), + key: Cow::Borrowed("content.org\\.matrix\\.msc3952\\.mentions.room"), value: Cow::Borrowed(&SimpleJsonValue::Bool(true)), })), Condition::Known(KnownCondition::SenderNotificationPermission { diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 9ff382ccc3d0..7e05f78f70a2 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -166,11 +166,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC3391: Removing account data. self.msc3391_enabled = experimental.get("msc3391_enabled", False) - # MSC3873: Disambiguate event_match keys. - self.msc3873_escape_event_match_key = experimental.get( - "msc3873_escape_event_match_key", False - ) - # MSC3952: Intentional mentions, this depends on MSC3966. self.msc3952_intentional_mentions = experimental.get( "msc3952_intentional_mentions", False @@ -181,10 +176,5 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "msc3958_supress_edit_notifs", False ) - # MSC3966: exact_event_property_contains push rule condition. - self.msc3966_exact_event_property_contains = experimental.get( - "msc3966_exact_event_property_contains", False - ) - # MSC3967: Do not require UIA when first uploading cross signing keys self.msc3967_enabled = experimental.get("msc3967_enabled", False) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 45622a9e9b2a..199337673fa2 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -273,10 +273,7 @@ async def _related_events( related_event_id, allow_none=True ) if related_event is not None: - related_events[relation_type] = _flatten_dict( - related_event, - msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key, - ) + related_events[relation_type] = _flatten_dict(related_event) reply_event_id = ( event.content.get("m.relates_to", {}) @@ -291,10 +288,7 @@ async def _related_events( ) if related_event is not None: - related_events["m.in_reply_to"] = _flatten_dict( - related_event, - msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key, - ) + related_events["m.in_reply_to"] = _flatten_dict(related_event) # indicate that this is from a fallback relation. if relation_type == "m.thread" and event.content.get( @@ -401,10 +395,7 @@ async def _action_for_event_by_user( ) evaluator = PushRuleEvaluator( - _flatten_dict( - event, - msc3873_escape_event_match_key=self.hs.config.experimental.msc3873_escape_event_match_key, - ), + _flatten_dict(event), has_mentions, room_member_count, sender_power_level, @@ -494,8 +485,6 @@ def _flatten_dict( d: Union[EventBase, Mapping[str, Any]], prefix: Optional[List[str]] = None, result: Optional[Dict[str, JsonValue]] = None, - *, - msc3873_escape_event_match_key: bool = False, ) -> Dict[str, JsonValue]: """ Given a JSON dictionary (or event) which might contain sub dictionaries, @@ -524,11 +513,10 @@ def _flatten_dict( if result is None: result = {} for key, value in d.items(): - if msc3873_escape_event_match_key: - # Escape periods in the key with a backslash (and backslashes with an - # extra backslash). This is since a period is used as a separator between - # nested fields. - key = key.replace("\\", "\\\\").replace(".", "\\.") + # Escape periods in the key with a backslash (and backslashes with an + # extra backslash). This is since a period is used as a separator between + # nested fields. + key = key.replace("\\", "\\\\").replace(".", "\\.") if _is_simple_value(value): result[".".join(prefix + [key])] = value @@ -536,12 +524,7 @@ def _flatten_dict( result[".".join(prefix + [key])] = [v for v in value if _is_simple_value(v)] elif isinstance(value, Mapping): # do not set `room_version` due to recursion considerations below - _flatten_dict( - value, - prefix=(prefix + [key]), - result=result, - msc3873_escape_event_match_key=msc3873_escape_event_match_key, - ) + _flatten_dict(value, prefix=(prefix + [key]), result=result) # `room_version` should only ever be set when looking at the top level of an event if ( diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 6deee0fd0275..52c4aafea652 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -51,11 +51,7 @@ def test_nested(self) -> None: # If a field has a dot in it, escape it. input = {"m.foo": {"b\\ar": "abc"}} - self.assertEqual({"m.foo.b\\ar": "abc"}, _flatten_dict(input)) - self.assertEqual( - {"m\\.foo.b\\\\ar": "abc"}, - _flatten_dict(input, msc3873_escape_event_match_key=True), - ) + self.assertEqual({"m\\.foo.b\\\\ar": "abc"}, _flatten_dict(input)) def test_non_string(self) -> None: """String, booleans, ints, nulls and list of those should be kept while other items are dropped.""" @@ -125,7 +121,7 @@ def test_extensible_events(self) -> None: "room_id": "!test:test", "sender": "@alice:test", "type": "m.room.message", - "content.org.matrix.msc1767.markup": [], + "content.org\\.matrix\\.msc1767\\.markup": [], } self.assertEqual(expected, _flatten_dict(event)) @@ -137,7 +133,7 @@ def test_extensible_events(self) -> None: "room_id": "!test:test", "sender": "@alice:test", "type": "m.room.message", - "content.org.matrix.msc1767.markup": [], + "content.org\\.matrix\\.msc1767\\.markup": [], } self.assertEqual(expected, _flatten_dict(event)) From 9418344db4534e31173f6ce51bd24c6224c9144b Mon Sep 17 00:00:00 2001 From: David Robertson Date: Tue, 7 Mar 2023 18:14:51 +0000 Subject: [PATCH 291/344] Fix typo in changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 212ebe2f36e7..2357be33fb25 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -47,7 +47,7 @@ Improved Documentation Deprecations and Removals ------------------------- -- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044] +- Deprecate the `on_threepid_bind` module callback, to be replaced by [`on_add_user_third_party_identifier`](https://matrix-org.github.io/synapse/v1.79/modules/third_party_rules_callbacks.html#on_add_user_third_party_identifier). See [upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.79/docs/upgrade.md#upgrading-to-v1790). ([\#15044](https://github.com/matrix-org/synapse/issues/15044)) - Remove the unspecced `room_alias` field from the [`/createRoom`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3createroom) response. ([\#15093](https://github.com/matrix-org/synapse/issues/15093)) - Remove the unspecced `PUT` on the `/knock/{roomIdOrAlias}` endpoint. ([\#15189](https://github.com/matrix-org/synapse/issues/15189)) - Remove the undocumented and unspecced `type` parameter to the `/thumbnail` endpoint. ([\#15137](https://github.com/matrix-org/synapse/issues/15137)) From a368d30c1cfe7457fca4fcdd03ae481ba65a226c Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 7 Mar 2023 13:54:39 -0800 Subject: [PATCH 292/344] More speedups/fixes to creating batched events (#15195) --- changelog.d/15195.misc | 1 + synapse/event_auth.py | 23 +++++++++++++++++------ synapse/events/snapshot.py | 1 + synapse/handlers/event_auth.py | 13 +++++++++++-- synapse/handlers/room.py | 4 +++- 5 files changed, 33 insertions(+), 9 deletions(-) create mode 100644 changelog.d/15195.misc diff --git a/changelog.d/15195.misc b/changelog.d/15195.misc new file mode 100644 index 000000000000..d8beea917d8b --- /dev/null +++ b/changelog.d/15195.misc @@ -0,0 +1 @@ +Improve performance of creating and authenticating events. \ No newline at end of file diff --git a/synapse/event_auth.py b/synapse/event_auth.py index 4d6d1b8ebd58..af55874b5c47 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -168,13 +168,24 @@ async def check_state_independent_auth_rules( return # 2. Reject if event has auth_events that: ... - auth_events = await store.get_events( - event.auth_event_ids(), - redact_behaviour=EventRedactBehaviour.as_is, - allow_rejected=True, - ) if batched_auth_events: - auth_events.update(batched_auth_events) + # Copy the batched auth events to avoid mutating them. + auth_events = dict(batched_auth_events) + needed_auth_event_ids = set(event.auth_event_ids()) - batched_auth_events.keys() + if needed_auth_event_ids: + auth_events.update( + await store.get_events( + needed_auth_event_ids, + redact_behaviour=EventRedactBehaviour.as_is, + allow_rejected=True, + ) + ) + else: + auth_events = await store.get_events( + event.auth_event_ids(), + redact_behaviour=EventRedactBehaviour.as_is, + allow_rejected=True, + ) room_id = event.room_id auth_dict: MutableStateMap[str] = {} diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a91a5d1e3cbe..c04ad08cbb61 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -293,6 +293,7 @@ async def get_prev_state_ids( Maps a (type, state_key) to the event ID of the state event matching this tuple. """ + assert self.state_group_before_event is not None return await self._storage.state.get_state_ids_for_group( self.state_group_before_event, state_filter diff --git a/synapse/handlers/event_auth.py b/synapse/handlers/event_auth.py index c508861b6a7b..0db0bd7304ce 100644 --- a/synapse/handlers/event_auth.py +++ b/synapse/handlers/event_auth.py @@ -63,9 +63,18 @@ async def check_auth_rules_from_context( self._store, event, batched_auth_events ) auth_event_ids = event.auth_event_ids() - auth_events_by_id = await self._store.get_events(auth_event_ids) + if batched_auth_events: - auth_events_by_id.update(batched_auth_events) + # Copy the batched auth events to avoid mutating them. + auth_events_by_id = dict(batched_auth_events) + needed_auth_event_ids = set(auth_event_ids) - set(batched_auth_events) + if needed_auth_event_ids: + auth_events_by_id.update( + await self._store.get_events(needed_auth_event_ids) + ) + else: + auth_events_by_id = await self._store.get_events(auth_event_ids) + check_state_dependent_auth_rules(event, auth_events_by_id.values()) def compute_auth_events( diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index b1784638f4ba..32451670f326 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -1123,7 +1123,9 @@ async def create_event( event_dict, prev_event_ids=prev_event, depth=depth, - state_map=state_map, + # Take a copy to ensure each event gets a unique copy of + # state_map since it is modified below. + state_map=dict(state_map), for_batch=for_batch, ) From f4fc83ac755b8b06ecab1a31592308b03f8d2a5e Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 8 Mar 2023 07:51:34 -0500 Subject: [PATCH 293/344] Add a missing endpoint to the workers documentation. (#15223) --- changelog.d/15223.doc | 1 + docs/workers.md | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/15223.doc diff --git a/changelog.d/15223.doc b/changelog.d/15223.doc new file mode 100644 index 000000000000..136b44df31a2 --- /dev/null +++ b/changelog.d/15223.doc @@ -0,0 +1 @@ +Add a missing endpoint to the workers documentation. diff --git a/docs/workers.md b/docs/workers.md index fa536cd31081..e0e99b44539e 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -231,6 +231,7 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/ ^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$ ^/_matrix/client/v1/rooms/.*/timestamp_to_event$ + ^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases ^/_matrix/client/(api/v1|r0|v3|unstable)/search$ ^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$) From 88efc75bab2849b7b1cee52770dea3cf9925b2e8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 8 Mar 2023 15:08:56 -0500 Subject: [PATCH 294/344] Include the room ID in more purge room log lines. (#15222) --- changelog.d/15222.misc | 1 + synapse/handlers/pagination.py | 2 +- synapse/storage/controllers/purge_events.py | 22 +++++++++++-------- .../storage/databases/main/purge_events.py | 11 +++++----- synapse/storage/databases/state/store.py | 2 ++ 5 files changed, 23 insertions(+), 15 deletions(-) create mode 100644 changelog.d/15222.misc diff --git a/changelog.d/15222.misc b/changelog.d/15222.misc new file mode 100644 index 000000000000..6361676a1537 --- /dev/null +++ b/changelog.d/15222.misc @@ -0,0 +1 @@ +Improve log lines when purging rooms. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 8c79c055ba3e..63b35c8d621f 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -683,7 +683,7 @@ async def _shutdown_and_purge_room( await self._storage_controllers.purge_events.purge_room(room_id) - logger.info("complete") + logger.info("purge complete for room_id %s", room_id) self._delete_by_id[delete_id].status = DeleteStatus.STATUS_COMPLETE except Exception: f = Failure() diff --git a/synapse/storage/controllers/purge_events.py b/synapse/storage/controllers/purge_events.py index 9ca50d6a0982..c599397b86ff 100644 --- a/synapse/storage/controllers/purge_events.py +++ b/synapse/storage/controllers/purge_events.py @@ -16,6 +16,7 @@ import logging from typing import TYPE_CHECKING, Set +from synapse.logging.context import nested_logging_context from synapse.storage.databases import Databases if TYPE_CHECKING: @@ -33,8 +34,9 @@ def __init__(self, hs: "HomeServer", stores: Databases): async def purge_room(self, room_id: str) -> None: """Deletes all record of a room""" - state_groups_to_delete = await self.stores.main.purge_room(room_id) - await self.stores.state.purge_room_state(room_id, state_groups_to_delete) + with nested_logging_context(room_id): + state_groups_to_delete = await self.stores.main.purge_room(room_id) + await self.stores.state.purge_room_state(room_id, state_groups_to_delete) async def purge_history( self, room_id: str, token: str, delete_local_events: bool @@ -51,15 +53,17 @@ async def purge_history( (instead of just marking them as outliers and deleting their state groups). """ - state_groups = await self.stores.main.purge_history( - room_id, token, delete_local_events - ) - - logger.info("[purge] finding state groups that can be deleted") + with nested_logging_context(room_id): + state_groups = await self.stores.main.purge_history( + room_id, token, delete_local_events + ) - sg_to_delete = await self._find_unreferenced_groups(state_groups) + logger.info("[purge] finding state groups that can be deleted") + sg_to_delete = await self._find_unreferenced_groups(state_groups) - await self.stores.state.purge_unreferenced_state_groups(room_id, sg_to_delete) + await self.stores.state.purge_unreferenced_state_groups( + room_id, sg_to_delete + ) async def _find_unreferenced_groups(self, state_groups: Set[int]) -> Set[int]: """Used when purging history to figure out which state groups can be diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index 9c41d01e1317..7a7c0d9c753d 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -325,6 +325,7 @@ async def purge_room(self, room_id: str) -> List[int]: # We then run the same purge a second time without this isolation level to # purge any of those rows which were added during the first. + logger.info("[purge] Starting initial main purge of [1/2]") state_groups_to_delete = await self.db_pool.runInteraction( "purge_room", self._purge_room_txn, @@ -332,6 +333,7 @@ async def purge_room(self, room_id: str) -> List[int]: isolation_level=IsolationLevel.READ_COMMITTED, ) + logger.info("[purge] Starting secondary main purge of [2/2]") state_groups_to_delete.extend( await self.db_pool.runInteraction( "purge_room", @@ -339,6 +341,7 @@ async def purge_room(self, room_id: str) -> List[int]: room_id=room_id, ), ) + logger.info("[purge] Done with main purge") return state_groups_to_delete @@ -376,7 +379,7 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: ) referenced_chain_id_tuples = list(txn) - logger.info("[purge] removing events from event_auth_chain_links") + logger.info("[purge] removing from event_auth_chain_links") txn.executemany( """ DELETE FROM event_auth_chain_links WHERE @@ -399,7 +402,7 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: "rejections", "state_events", ): - logger.info("[purge] removing %s from %s", room_id, table) + logger.info("[purge] removing from %s", table) txn.execute( """ @@ -454,7 +457,7 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: # happy "rooms", ): - logger.info("[purge] removing %s from %s", room_id, table) + logger.info("[purge] removing from %s", table) txn.execute("DELETE FROM %s WHERE room_id=?" % (table,), (room_id,)) # Other tables we do NOT need to clear out: @@ -486,6 +489,4 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: # that already exist. self._invalidate_cache_and_stream(txn, self.have_seen_event, (room_id,)) - logger.info("[purge] done") - return state_groups diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index bf4cdfdf2917..29ff64e8764a 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -805,12 +805,14 @@ async def purge_room_state( state_groups_to_delete: State groups to delete """ + logger.info("[purge] Starting state purge") await self.db_pool.runInteraction( "purge_room_state", self._purge_room_state_txn, room_id, state_groups_to_delete, ) + logger.info("[purge] Done with state purge") def _purge_room_state_txn( self, From be4ea209e8bf92fb3660807c1fe8ad3d7d05621f Mon Sep 17 00:00:00 2001 From: Shay Date: Wed, 8 Mar 2023 19:27:20 -0800 Subject: [PATCH 295/344] Add topic and name events to group of events that are batch persisted when creating a room. (#15229) --- changelog.d/15229.misc | 1 + synapse/handlers/room.py | 108 +++++++++++++++++++-------------------- 2 files changed, 53 insertions(+), 56 deletions(-) create mode 100644 changelog.d/15229.misc diff --git a/changelog.d/15229.misc b/changelog.d/15229.misc new file mode 100644 index 000000000000..4d8ea03b27c6 --- /dev/null +++ b/changelog.d/15229.misc @@ -0,0 +1 @@ +Add topic and name events to group of events that are batch persisted when creating a room. diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 32451670f326..be120cb12f36 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -569,7 +569,7 @@ async def clone_existing_room( new_room_id, # we expect to override all the presets with initial_state, so this is # somewhat arbitrary. - preset_config=RoomCreationPreset.PRIVATE_CHAT, + room_config={"preset": RoomCreationPreset.PRIVATE_CHAT}, invite_list=[], initial_state=initial_state, creation_content=creation_content, @@ -904,13 +904,6 @@ async def create_room( check_membership=False, ) - preset_config = config.get( - "preset", - RoomCreationPreset.PRIVATE_CHAT - if visibility == "private" - else RoomCreationPreset.PUBLIC_CHAT, - ) - raw_initial_state = config.get("initial_state", []) initial_state = OrderedDict() @@ -929,7 +922,7 @@ async def create_room( ) = await self._send_events_for_new_room( requester, room_id, - preset_config=preset_config, + room_config=config, invite_list=invite_list, initial_state=initial_state, creation_content=creation_content, @@ -938,48 +931,6 @@ async def create_room( creator_join_profile=creator_join_profile, ) - if "name" in config: - name = config["name"] - ( - name_event, - last_stream_id, - ) = await self.event_creation_handler.create_and_send_nonmember_event( - requester, - { - "type": EventTypes.Name, - "room_id": room_id, - "sender": user_id, - "state_key": "", - "content": {"name": name}, - }, - ratelimit=False, - prev_event_ids=[last_sent_event_id], - depth=depth, - ) - last_sent_event_id = name_event.event_id - depth += 1 - - if "topic" in config: - topic = config["topic"] - ( - topic_event, - last_stream_id, - ) = await self.event_creation_handler.create_and_send_nonmember_event( - requester, - { - "type": EventTypes.Topic, - "room_id": room_id, - "sender": user_id, - "state_key": "", - "content": {"topic": topic}, - }, - ratelimit=False, - prev_event_ids=[last_sent_event_id], - depth=depth, - ) - last_sent_event_id = topic_event.event_id - depth += 1 - # we avoid dropping the lock between invites, as otherwise joins can # start coming in and making the createRoom slow. # @@ -1047,7 +998,7 @@ async def _send_events_for_new_room( self, creator: Requester, room_id: str, - preset_config: str, + room_config: JsonDict, invite_list: List[str], initial_state: MutableStateMap, creation_content: JsonDict, @@ -1064,11 +1015,33 @@ async def _send_events_for_new_room( Rate limiting should already have been applied by this point. + Args: + creator: + the user requesting the room creation + room_id: + room id for the room being created + room_config: + A dict of configuration options. This will be the body of + a /createRoom request; see + https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3createroom + invite_list: + a list of user ids to invite to the room + initial_state: + A list of state events to set in the new room. + creation_content: + Extra keys, such as m.federate, to be added to the content of the m.room.create event. + room_alias: + alias for the room + power_level_content_override: + The power level content to override in the default power level event. + creator_join_profile: + Set to override the displayname and avatar for the creating + user in this room. + Returns: A tuple containing the stream ID, event ID and depth of the last event sent to the room. """ - creator_id = creator.user.to_string() event_keys = {"room_id": room_id, "sender": creator_id, "state_key": ""} depth = 1 @@ -1079,9 +1052,6 @@ async def _send_events_for_new_room( # created (but not persisted to the db) to determine state for future created events # (as this info can't be pulled from the db) state_map: MutableStateMap[str] = {} - # current_state_group of last event created. Used for computing event context of - # events to be batched - current_state_group: Optional[int] = None def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict: e = {"type": etype, "content": content} @@ -1135,6 +1105,14 @@ async def create_event( return new_event, new_unpersisted_context + visibility = room_config.get("visibility", "private") + preset_config = room_config.get( + "preset", + RoomCreationPreset.PRIVATE_CHAT + if visibility == "private" + else RoomCreationPreset.PUBLIC_CHAT, + ) + try: config = self._presets_dict[preset_config] except KeyError: @@ -1286,6 +1264,24 @@ async def create_event( ) events_to_send.append((encryption_event, encryption_context)) + if "name" in room_config: + name = room_config["name"] + name_event, name_context = await create_event( + EventTypes.Name, + {"name": name}, + True, + ) + events_to_send.append((name_event, name_context)) + + if "topic" in room_config: + topic = room_config["topic"] + topic_event, topic_context = await create_event( + EventTypes.Topic, + {"topic": topic}, + True, + ) + events_to_send.append((topic_event, topic_context)) + datastore = self.hs.get_datastores().state events_and_context = ( await UnpersistedEventContext.batch_persist_unpersisted_contexts( From e7c3832ba65aa3b82d3738c6f8554e21d9d87d04 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 9 Mar 2023 07:09:49 -0500 Subject: [PATCH 296/344] Pull in netaddr type hints. (#15231) And fix any issues from having those type hints. --- changelog.d/15231.misc | 1 + mypy.ini | 5 ----- poetry.lock | 16 ++++++++++++++-- pyproject.toml | 1 + synapse/http/client.py | 8 +++++--- .../http/federation/matrix_federation_agent.py | 2 +- tests/http/test_client.py | 2 +- 7 files changed, 23 insertions(+), 12 deletions(-) create mode 100644 changelog.d/15231.misc diff --git a/changelog.d/15231.misc b/changelog.d/15231.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15231.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index 572734f8e7cf..cad3716389e1 100644 --- a/mypy.ini +++ b/mypy.ini @@ -74,11 +74,6 @@ ignore_missing_imports = True [mypy-msgpack] ignore_missing_imports = True -# Note: WIP stubs available at -# https://github.com/microsoft/python-type-stubs/tree/64934207f523ad6b611e6cfe039d85d7175d7d0d/netaddr -[mypy-netaddr] -ignore_missing_imports = True - [mypy-parameterized.*] ignore_missing_imports = True diff --git a/poetry.lock b/poetry.lock index 24adc4c8760c..cd89418dd75f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1722,7 +1722,7 @@ files = [ cffi = ">=1.4.1" [package.extras] -docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] +docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"] tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] @@ -2597,6 +2597,18 @@ files = [ {file = "types_jsonschema-4.17.0.5-py3-none-any.whl", hash = "sha256:79ac8a7763fe728947af90a24168b91621edf7e8425bf3670abd4ea0d4758fba"}, ] +[[package]] +name = "types-netaddr" +version = "0.8.0.6" +description = "Typing stubs for netaddr" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "types-netaddr-0.8.0.6.tar.gz", hash = "sha256:e5048640c2412e7ea2d3eb02c94ae1b50442b2c7a50a7c48e957676139cdf19b"}, + {file = "types_netaddr-0.8.0.6-py3-none-any.whl", hash = "sha256:d4d40d1ba35430a4e4c929596542cd37e6831f5d08676b33dc84e06e01a840f6"}, +] + [[package]] name = "types-opentracing" version = "2.4.10.3" @@ -2990,4 +3002,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.7.1" -content-hash = "7bcffef7b6e6d4b1113222e2ca152b3798c997872789c8a1ea01238f199d56fe" +content-hash = "de2c4c8de336593478ce02581a5336afe2544db93ea82f3955b34c3653c29a26" diff --git a/pyproject.toml b/pyproject.toml index 90a118741641..074ac2c11e5a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -321,6 +321,7 @@ mypy-zope = "*" types-bleach = ">=4.1.0" types-commonmark = ">=0.9.2" types-jsonschema = ">=3.2.0" +types-netaddr = ">=0.8.0.6" types-opentracing = ">=2.4.2" types-Pillow = ">=8.3.4" types-psycopg2 = ">=2.9.9" diff --git a/synapse/http/client.py b/synapse/http/client.py index ae48e7c3f004..d777d59ccf5d 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -268,8 +268,8 @@ class BlacklistingAgentWrapper(Agent): def __init__( self, agent: IAgent, + ip_blacklist: IPSet, ip_whitelist: Optional[IPSet] = None, - ip_blacklist: Optional[IPSet] = None, ): """ Args: @@ -291,7 +291,9 @@ def request( h = urllib.parse.urlparse(uri.decode("ascii")) try: - ip_address = IPAddress(h.hostname) + # h.hostname is Optional[str], None raises an AddrFormatError, so + # this is safe even though IPAddress requires a str. + ip_address = IPAddress(h.hostname) # type: ignore[arg-type] except AddrFormatError: # Not an IP pass @@ -388,8 +390,8 @@ def __init__( # by the DNS resolution. self.agent = BlacklistingAgentWrapper( self.agent, - ip_whitelist=self._ip_whitelist, ip_blacklist=self._ip_blacklist, + ip_whitelist=self._ip_whitelist, ) async def request( diff --git a/synapse/http/federation/matrix_federation_agent.py b/synapse/http/federation/matrix_federation_agent.py index 0359231e7dd3..8d7d0a387529 100644 --- a/synapse/http/federation/matrix_federation_agent.py +++ b/synapse/http/federation/matrix_federation_agent.py @@ -87,7 +87,7 @@ def __init__( reactor: ISynapseReactor, tls_client_options_factory: Optional[FederationPolicyForHTTPS], user_agent: bytes, - ip_whitelist: IPSet, + ip_whitelist: Optional[IPSet], ip_blacklist: IPSet, _srv_resolver: Optional[SrvResolver] = None, _well_known_resolver: Optional[WellKnownResolver] = None, diff --git a/tests/http/test_client.py b/tests/http/test_client.py index f6d668498589..57b6a84e2319 100644 --- a/tests/http/test_client.py +++ b/tests/http/test_client.py @@ -210,8 +210,8 @@ def test_agent(self) -> None: """Apply the blacklisting agent and ensure it properly blocks connections to particular IPs.""" agent = BlacklistingAgentWrapper( Agent(self.reactor), - ip_whitelist=self.ip_whitelist, ip_blacklist=self.ip_blacklist, + ip_whitelist=self.ip_whitelist, ) # The unsafe IPs should be rejected. From 3d060eae6c836f4153a3150c6970cb9b10516da6 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 9 Mar 2023 07:10:09 -0500 Subject: [PATCH 297/344] Add missing type hints to `synapse.storage.database`. (#15230) --- changelog.d/15230.misc | 1 + mypy.ini | 3 --- synapse/storage/database.py | 21 ++++++++++++++++----- 3 files changed, 17 insertions(+), 8 deletions(-) create mode 100644 changelog.d/15230.misc diff --git a/changelog.d/15230.misc b/changelog.d/15230.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15230.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/mypy.ini b/mypy.ini index cad3716389e1..945f7925cb2c 100644 --- a/mypy.ini +++ b/mypy.ini @@ -48,9 +48,6 @@ warn_unused_ignores = False [mypy-synapse.util.caches.treecache] disallow_untyped_defs = False -[mypy-synapse.storage.database] -disallow_untyped_defs = False - [mypy-tests.util.caches.test_descriptors] disallow_untyped_defs = False diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 5efe31aa19bc..fec4ae5b970d 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -34,6 +34,7 @@ Tuple, Type, TypeVar, + Union, cast, overload, ) @@ -100,6 +101,15 @@ } +class _PoolConnection(Connection): + """ + A Connection from twisted.enterprise.adbapi.Connection. + """ + + def reconnect(self) -> None: + ... + + def make_pool( reactor: IReactorCore, db_config: DatabaseConnectionConfig, @@ -856,7 +866,8 @@ async def _runInteraction() -> R: try: with opentracing.start_active_span(f"db.{desc}"): result = await self.runWithConnection( - self.new_transaction, + # mypy seems to have an issue with this, maybe a bug? + self.new_transaction, # type: ignore[arg-type] desc, after_callbacks, async_after_callbacks, @@ -892,7 +903,7 @@ async def _runInteraction() -> R: async def runWithConnection( self, - func: Callable[..., R], + func: Callable[Concatenate[LoggingDatabaseConnection, P], R], *args: Any, db_autocommit: bool = False, isolation_level: Optional[int] = None, @@ -926,7 +937,7 @@ async def runWithConnection( start_time = monotonic_time() - def inner_func(conn, *args, **kwargs): + def inner_func(conn: _PoolConnection, *args: P.args, **kwargs: P.kwargs) -> R: # We shouldn't be in a transaction. If we are then something # somewhere hasn't committed after doing work. (This is likely only # possible during startup, as `run*` will ensure changes are @@ -1019,7 +1030,7 @@ async def execute( decoder: Optional[Callable[[Cursor], R]], query: str, *args: Any, - ) -> R: + ) -> Union[List[Tuple[Any, ...]], R]: """Runs a single query for a result set. Args: @@ -1032,7 +1043,7 @@ async def execute( The result of decoder(results) """ - def interaction(txn): + def interaction(txn: LoggingTransaction) -> Union[List[Tuple[Any, ...]], R]: txn.execute(query, args) if decoder: return decoder(txn) From caf43c3d7c51edad250e50add5595e44720ba32f Mon Sep 17 00:00:00 2001 From: Sean Quah <8349537+squahtx@users.noreply.github.com> Date: Thu, 9 Mar 2023 14:18:39 +0000 Subject: [PATCH 298/344] Faster joins: Fix spurious errors on incremental sync (#15232) When pushing events in partial state rooms down incremental /sync, we try to find the `m.room.member` state event for their senders by digging through their auth events, so that we can present the membership to the client. Events usually have a membership event in their auth events, with the exception of the `m.room.create` event and a user's first join into the room. When implementing #13477, we took the case of a user's first join into account, but forgot to handle the `m.room.create` case. This change fixes that. Signed-off-by: Sean Quah --- changelog.d/15232.bugfix | 1 + synapse/handlers/sync.py | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15232.bugfix diff --git a/changelog.d/15232.bugfix b/changelog.d/15232.bugfix new file mode 100644 index 000000000000..d75a4f2d99d5 --- /dev/null +++ b/changelog.d/15232.bugfix @@ -0,0 +1 @@ +Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index fd6d946c37f0..9f5b83ed5492 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1226,6 +1226,10 @@ async def _find_missing_partial_state_memberships( continue event_with_membership_auth = events_with_membership_auth[member] + is_create = ( + event_with_membership_auth.is_state() + and event_with_membership_auth.type == EventTypes.Create + ) is_join = ( event_with_membership_auth.is_state() and event_with_membership_auth.type == EventTypes.Member @@ -1233,9 +1237,10 @@ async def _find_missing_partial_state_memberships( and event_with_membership_auth.content.get("membership") == Membership.JOIN ) - if not is_join: + if not is_create and not is_join: # The event must include the desired membership as an auth event, unless - # it's the first join event for a given user. + # it's the `m.room.create` event for a room or the first join event for + # a given user. missing_members.add(member) auth_event_ids.update(event_with_membership_auth.auth_event_ids()) From ce54477f6fa0264ef00b15bc3e0c2503d85ab061 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Thu, 9 Mar 2023 19:12:09 +0000 Subject: [PATCH 299/344] Give PyCharm some help with `@cache_in_self` (#15238) * Give PyCharm some help with `@cache_in_self` * Changelog * Fix import for old python versions --- changelog.d/15238.misc | 1 + synapse/server.py | 29 ++++++++++++++++++++++++++--- 2 files changed, 27 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15238.misc diff --git a/changelog.d/15238.misc b/changelog.d/15238.misc new file mode 100644 index 000000000000..93ceaeafc9b9 --- /dev/null +++ b/changelog.d/15238.misc @@ -0,0 +1 @@ +Improve type hints. diff --git a/synapse/server.py b/synapse/server.py index df80fc1bebdc..8078463530aa 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -23,6 +23,8 @@ import logging from typing import TYPE_CHECKING, Callable, Dict, List, Optional, TypeVar, cast +from typing_extensions import TypeAlias + from twisted.internet.interfaces import IOpenSSLContextFactory from twisted.internet.tcp import Port from twisted.web.iweb import IPolicyForHTTPS @@ -142,10 +144,31 @@ from synapse.handlers.saml import SamlHandler -T = TypeVar("T") +# The annotation for `cache_in_self` used to be +# def (builder: Callable[["HomeServer"],T]) -> Callable[["HomeServer"],T] +# which mypy was happy with. +# +# But PyCharm was confused by this. If `foo` was decorated by `@cache_in_self`, then +# an expression like `hs.foo()` +# +# - would erroneously warn that we hadn't provided a `hs` argument to foo (PyCharm +# confused about boundmethods and unbound methods?), and +# - would be considered to have type `Any`, making for a poor autocomplete and +# cross-referencing experience. +# +# Instead, use a typevar `F` to express that `@cache_in_self` returns exactly the +# same type it receives. This isn't strictly true [*], but it's more than good +# enough to keep PyCharm and mypy happy. +# +# [*]: (e.g. `builder` could be an object with a __call__ attribute rather than a +# types.FunctionType instance, whereas the return value is always a +# types.FunctionType instance.) + +T: TypeAlias = object +F = TypeVar("F", bound=Callable[["HomeServer"], T]) -def cache_in_self(builder: Callable[["HomeServer"], T]) -> Callable[["HomeServer"], T]: +def cache_in_self(builder: F) -> F: """Wraps a function called e.g. `get_foo`, checking if `self.foo` exists and returning if so. If not, calls the given function and sets `self.foo` to it. @@ -183,7 +206,7 @@ def _get(self: "HomeServer") -> T: return dep - return _get + return cast(F, _get) class HomeServer(metaclass=abc.ABCMeta): From e157c63f68ef93c0c25d2df0c63f7da0b30f95ca Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 10 Mar 2023 10:35:18 +0000 Subject: [PATCH 300/344] Fix missing conditional for registering `on_remove_user_third_party_identifier` module api callbacks (#15227 --- changelog.d/15227.bugfix | 1 + synapse/events/third_party_rules.py | 5 +++++ tests/rest/client/test_third_party_rules.py | 19 ++++++++++--------- 3 files changed, 16 insertions(+), 9 deletions(-) create mode 100644 changelog.d/15227.bugfix diff --git a/changelog.d/15227.bugfix b/changelog.d/15227.bugfix new file mode 100644 index 000000000000..eaa26c8f7fa3 --- /dev/null +++ b/changelog.d/15227.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. \ No newline at end of file diff --git a/synapse/events/third_party_rules.py b/synapse/events/third_party_rules.py index 3e4d52c8d803..61d4530be784 100644 --- a/synapse/events/third_party_rules.py +++ b/synapse/events/third_party_rules.py @@ -247,6 +247,11 @@ def register_third_party_rules_callbacks( on_add_user_third_party_identifier ) + if on_remove_user_third_party_identifier is not None: + self._on_remove_user_third_party_identifier_callbacks.append( + on_remove_user_third_party_identifier + ) + async def check_event_allowed( self, event: EventBase, diff --git a/tests/rest/client/test_third_party_rules.py b/tests/rest/client/test_third_party_rules.py index 3b9951370752..753ecc8d161a 100644 --- a/tests/rest/client/test_third_party_rules.py +++ b/tests/rest/client/test_third_party_rules.py @@ -941,18 +941,16 @@ def test_on_add_and_remove_user_third_party_identifier(self) -> None: just before associating and removing a 3PID to/from an account. """ # Pretend to be a Synapse module and register both callbacks as mocks. - third_party_rules = self.hs.get_third_party_event_rules() on_add_user_third_party_identifier_callback_mock = Mock( return_value=make_awaitable(None) ) on_remove_user_third_party_identifier_callback_mock = Mock( return_value=make_awaitable(None) ) - third_party_rules._on_threepid_bind_callbacks.append( - on_add_user_third_party_identifier_callback_mock - ) - third_party_rules._on_threepid_bind_callbacks.append( - on_remove_user_third_party_identifier_callback_mock + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules.register_third_party_rules_callbacks( + on_add_user_third_party_identifier=on_add_user_third_party_identifier_callback_mock, + on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, ) # Register an admin user. @@ -1008,12 +1006,12 @@ def test_on_remove_user_third_party_identifier_is_called_on_deactivate( when a user is deactivated and their third-party ID associations are deleted. """ # Pretend to be a Synapse module and register both callbacks as mocks. - third_party_rules = self.hs.get_third_party_event_rules() on_remove_user_third_party_identifier_callback_mock = Mock( return_value=make_awaitable(None) ) - third_party_rules._on_threepid_bind_callbacks.append( - on_remove_user_third_party_identifier_callback_mock + third_party_rules = self.hs.get_third_party_event_rules() + third_party_rules.register_third_party_rules_callbacks( + on_remove_user_third_party_identifier=on_remove_user_third_party_identifier_callback_mock, ) # Register an admin user. @@ -1039,6 +1037,9 @@ def test_on_remove_user_third_party_identifier_is_called_on_deactivate( ) self.assertEqual(channel.code, 200, channel.json_body) + # Check that the mock was not called on the act of adding a third-party ID. + on_remove_user_third_party_identifier_callback_mock.assert_not_called() + # Now deactivate the user. channel = self.make_request( "PUT", From 4bb26c95a931e0be79d6ab9649e4338f7467a987 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Fri, 10 Mar 2023 15:31:25 +0000 Subject: [PATCH 301/344] Refactor `filter_events_for_server` (#15240) * Tweak docstring and type hint * Flip logic and provide better name * Separate decision from action * Track a set of strings, not EventBases * Require explicit boolean options from callers * Add explicit option for partial state rooms * Changelog * Rename param --- changelog.d/15240.misc | 1 + .../sender/per_destination_queue.py | 2 + synapse/handlers/federation.py | 29 ++++++-- synapse/visibility.py | 67 +++++++++++++------ tests/test_visibility.py | 40 +++++++++-- 5 files changed, 109 insertions(+), 30 deletions(-) create mode 100644 changelog.d/15240.misc diff --git a/changelog.d/15240.misc b/changelog.d/15240.misc new file mode 100644 index 000000000000..2b7edf916ec5 --- /dev/null +++ b/changelog.d/15240.misc @@ -0,0 +1 @@ +Refactor `filter_events_for_server`. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index ffc9d95ee703..478187ce449a 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -547,6 +547,8 @@ async def _catch_up_transmission_loop(self) -> None: self._server_name, new_pdus, redact=False, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) # If we've filtered out all the extremities, fall back to diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 5f2057269dac..80156ef343aa 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -392,7 +392,7 @@ async def _maybe_backfill_inner( get_prev_content=False, ) - # We set `check_history_visibility_only` as we might otherwise get false + # We unset `filter_out_erased_senders` as we might otherwise get false # positives from users having been erased. filtered_extremities = await filter_events_for_server( self._storage_controllers, @@ -400,7 +400,8 @@ async def _maybe_backfill_inner( self.server_name, events_to_check, redact=False, - check_history_visibility_only=True, + filter_out_erased_senders=False, + filter_out_remote_partial_state_events=False, ) if filtered_extremities: extremities_to_request.append(bp.event_id) @@ -1331,7 +1332,13 @@ async def on_backfill_request( ) events = await filter_events_for_server( - self._storage_controllers, origin, self.server_name, events + self._storage_controllers, + origin, + self.server_name, + events, + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) return events @@ -1362,7 +1369,13 @@ async def get_persisted_pdu( await self._event_auth_handler.assert_host_in_room(event.room_id, origin) events = await filter_events_for_server( - self._storage_controllers, origin, self.server_name, [event] + self._storage_controllers, + origin, + self.server_name, + [event], + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) event = events[0] return event @@ -1390,7 +1403,13 @@ async def on_get_missing_events( ) missing_events = await filter_events_for_server( - self._storage_controllers, origin, self.server_name, missing_events + self._storage_controllers, + origin, + self.server_name, + missing_events, + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) return missing_events diff --git a/synapse/visibility.py b/synapse/visibility.py index e442de31739e..468e22f8f64e 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -14,7 +14,17 @@ # limitations under the License. import logging from enum import Enum, auto -from typing import Collection, Dict, FrozenSet, List, Optional, Tuple +from typing import ( + Collection, + Dict, + FrozenSet, + List, + Mapping, + Optional, + Sequence, + Set, + Tuple, +) import attr from typing_extensions import Final @@ -565,29 +575,43 @@ async def filter_events_for_server( storage: StorageControllers, target_server_name: str, local_server_name: str, - events: List[EventBase], - redact: bool = True, - check_history_visibility_only: bool = False, + events: Sequence[EventBase], + *, + redact: bool, + filter_out_erased_senders: bool, + filter_out_remote_partial_state_events: bool, ) -> List[EventBase]: - """Filter a list of events based on whether given server is allowed to + """Filter a list of events based on whether the target server is allowed to see them. + For a fully stated room, the target server is allowed to see an event E if: + - the state at E has world readable or shared history vis, OR + - the state at E says that the target server is in the room. + + For a partially stated room, the target server is allowed to see E if: + - E was created by this homeserver, AND: + - the partial state at E has world readable or shared history vis, OR + - the partial state at E says that the target server is in the room. + + TODO: state before or state after? + Args: storage - server_name + target_server_name + local_server_name events - redact: Whether to return a redacted version of the event, or - to filter them out entirely. - check_history_visibility_only: Whether to only check the - history visibility, rather than things like if the sender has been + redact: Controls what to do with events which have been filtered out. + If True, include their redacted forms; if False, omit them entirely. + filter_out_erased_senders: If true, also filter out events whose sender has been erased. This is used e.g. during pagination to decide whether to backfill or not. - + filter_out_remote_partial_state_events: If True, also filter out events in + partial state rooms created by other homeservers. Returns The filtered events. """ - def is_sender_erased(event: EventBase, erased_senders: Dict[str, bool]) -> bool: + def is_sender_erased(event: EventBase, erased_senders: Mapping[str, bool]) -> bool: if erased_senders and erased_senders[event.sender]: logger.info("Sender of %s has been erased, redacting", event.event_id) return True @@ -616,7 +640,7 @@ def check_event_is_visible( # server has no users in the room: redact return False - if not check_history_visibility_only: + if filter_out_erased_senders: erased_senders = await storage.main.are_users_erased(e.sender for e in events) else: # We don't want to check whether users are erased, which is equivalent @@ -631,15 +655,15 @@ def check_event_is_visible( # otherwise a room could be fully joined after we retrieve those, which would then bypass # this check but would base the filtering on an outdated view of the membership events. - partial_state_invisible_events = set() - if not check_history_visibility_only: + partial_state_invisible_event_ids: Set[str] = set() + if filter_out_remote_partial_state_events: for e in events: sender_domain = get_domain_from_id(e.sender) if ( sender_domain != local_server_name and await storage.main.is_partial_state_room(e.room_id) ): - partial_state_invisible_events.add(e) + partial_state_invisible_event_ids.add(e.event_id) # Let's check to see if all the events have a history visibility # of "shared" or "world_readable". If that's the case then we don't @@ -658,17 +682,20 @@ def check_event_is_visible( target_server_name, ) - to_return = [] - for e in events: + def include_event_in_output(e: EventBase) -> bool: erased = is_sender_erased(e, erased_senders) visible = check_event_is_visible( event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {}) ) - if e in partial_state_invisible_events: + if e.event_id in partial_state_invisible_event_ids: visible = False - if visible and not erased: + return visible and not erased + + to_return = [] + for e in events: + if include_event_in_output(e): to_return.append(e) elif redact: to_return.append(prune_event(e)) diff --git a/tests/test_visibility.py b/tests/test_visibility.py index 2801a950a85c..9ed330f55497 100644 --- a/tests/test_visibility.py +++ b/tests/test_visibility.py @@ -63,7 +63,13 @@ def test_filtering(self) -> None: filtered = self.get_success( filter_events_for_server( - self._storage_controllers, "test_server", "hs", events_to_filter + self._storage_controllers, + "test_server", + "hs", + events_to_filter, + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) ) @@ -85,7 +91,13 @@ def test_filter_outlier(self) -> None: self.assertEqual( self.get_success( filter_events_for_server( - self._storage_controllers, "remote_hs", "hs", [outlier] + self._storage_controllers, + "remote_hs", + "hs", + [outlier], + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) ), [outlier], @@ -96,7 +108,13 @@ def test_filter_outlier(self) -> None: filtered = self.get_success( filter_events_for_server( - self._storage_controllers, "remote_hs", "local_hs", [outlier, evt] + self._storage_controllers, + "remote_hs", + "local_hs", + [outlier, evt], + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) ) self.assertEqual(len(filtered), 2, f"expected 2 results, got: {filtered}") @@ -108,7 +126,13 @@ def test_filter_outlier(self) -> None: # be redacted) filtered = self.get_success( filter_events_for_server( - self._storage_controllers, "other_server", "local_hs", [outlier, evt] + self._storage_controllers, + "other_server", + "local_hs", + [outlier, evt], + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) ) self.assertEqual(filtered[0], outlier) @@ -143,7 +167,13 @@ def test_erased_user(self) -> None: # ... and the filtering happens. filtered = self.get_success( filter_events_for_server( - self._storage_controllers, "test_server", "local_hs", events_to_filter + self._storage_controllers, + "test_server", + "local_hs", + events_to_filter, + redact=True, + filter_out_erased_senders=True, + filter_out_remote_partial_state_events=True, ) ) From ff155f7891544d316288d0a89cebbacdbfff9c2c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Mar 2023 10:27:55 +0000 Subject: [PATCH 302/344] Bump gitpython from 3.1.30 to 3.1.31 (#15256) * Bump gitpython from 3.1.30 to 3.1.31 Bumps [gitpython](https://github.com/gitpython-developers/GitPython) from 3.1.30 to 3.1.31. - [Release notes](https://github.com/gitpython-developers/GitPython/releases) - [Changelog](https://github.com/gitpython-developers/GitPython/blob/main/CHANGES) - [Commits](https://github.com/gitpython-developers/GitPython/compare/3.1.30...3.1.31) --- updated-dependencies: - dependency-name: gitpython dependency-type: direct:development update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15256.misc | 1 + poetry.lock | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) create mode 100644 changelog.d/15256.misc diff --git a/changelog.d/15256.misc b/changelog.d/15256.misc new file mode 100644 index 000000000000..ecdc2e5d22ee --- /dev/null +++ b/changelog.d/15256.misc @@ -0,0 +1 @@ +Bump gitpython from 3.1.30 to 3.1.31. diff --git a/poetry.lock b/poetry.lock index cd89418dd75f..702d3a0301c0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -497,14 +497,14 @@ smmap = ">=3.0.1,<6" [[package]] name = "gitpython" -version = "3.1.30" -description = "GitPython is a python library used to interact with Git repositories" +version = "3.1.31" +description = "GitPython is a Python library used to interact with Git repositories" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "GitPython-3.1.30-py3-none-any.whl", hash = "sha256:cd455b0000615c60e286208ba540271af9fe531fa6a87cc590a7298785ab2882"}, - {file = "GitPython-3.1.30.tar.gz", hash = "sha256:769c2d83e13f5d938b7688479da374c4e3d49f71549aaf462b646db9602ea6f8"}, + {file = "GitPython-3.1.31-py3-none-any.whl", hash = "sha256:f04893614f6aa713a60cbbe1e6a97403ef633103cdd0ef5eb6efe0deb98dbe8d"}, + {file = "GitPython-3.1.31.tar.gz", hash = "sha256:8ce3bcf69adfdf7c7d503e78fd3b1c492af782d58893b650adb2ac8912ddd573"}, ] [package.dependencies] @@ -1722,7 +1722,7 @@ files = [ cffi = ">=1.4.1" [package.extras] -docs = ["sphinx (>=1.6.5)", "sphinx_rtd_theme"] +docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"] tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] From 6326d744c973d19db8f308394b37064a2cf31b92 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Mar 2023 10:28:05 +0000 Subject: [PATCH 303/344] Bump msgpack from 1.0.4 to 1.0.5 (#15255) * Bump msgpack from 1.0.4 to 1.0.5 Bumps [msgpack](https://github.com/msgpack/msgpack-python) from 1.0.4 to 1.0.5. - [Release notes](https://github.com/msgpack/msgpack-python/releases) - [Changelog](https://github.com/msgpack/msgpack-python/blob/main/ChangeLog.rst) - [Commits](https://github.com/msgpack/msgpack-python/compare/v1.0.4...v1.0.5) --- updated-dependencies: - dependency-name: msgpack dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15255.misc | 1 + poetry.lock | 117 ++++++++++++++++++++++------------------- 2 files changed, 65 insertions(+), 53 deletions(-) create mode 100644 changelog.d/15255.misc diff --git a/changelog.d/15255.misc b/changelog.d/15255.misc new file mode 100644 index 000000000000..8be3b828e96b --- /dev/null +++ b/changelog.d/15255.misc @@ -0,0 +1 @@ +Bump msgpack from 1.0.4 to 1.0.5. diff --git a/poetry.lock b/poetry.lock index 702d3a0301c0..be1424b1f99b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1098,64 +1098,75 @@ dev = ["black (==22.3.0)", "flake8 (==4.0.1)", "isort (==5.9.3)", "ldaptor", "ma [[package]] name = "msgpack" -version = "1.0.4" +version = "1.0.5" description = "MessagePack serializer" category = "main" optional = false python-versions = "*" files = [ - {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"}, - {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"}, - {file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"}, - {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"}, - {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"}, - {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"}, - {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"}, - {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"}, - {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"}, - {file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"}, - {file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"}, - {file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"}, - {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"}, - {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"}, - {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"}, - {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"}, - {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"}, - {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"}, - {file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"}, - {file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"}, - {file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"}, - {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"}, - {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"}, - {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"}, - {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"}, - {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"}, - {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"}, - {file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"}, - {file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"}, - {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"}, - {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"}, - {file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"}, - {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"}, - {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"}, - {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"}, - {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"}, - {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"}, - {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"}, - {file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"}, - {file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"}, - {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"}, - {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"}, - {file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"}, - {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"}, - {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"}, - {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"}, - {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"}, - {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"}, - {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"}, - {file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"}, - {file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"}, - {file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"}, + {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:525228efd79bb831cf6830a732e2e80bc1b05436b086d4264814b4b2955b2fa9"}, + {file = "msgpack-1.0.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4f8d8b3bf1ff2672567d6b5c725a1b347fe838b912772aa8ae2bf70338d5a198"}, + {file = "msgpack-1.0.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cdc793c50be3f01106245a61b739328f7dccc2c648b501e237f0699fe1395b81"}, + {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cb47c21a8a65b165ce29f2bec852790cbc04936f502966768e4aae9fa763cb7"}, + {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e42b9594cc3bf4d838d67d6ed62b9e59e201862a25e9a157019e171fbe672dd3"}, + {file = "msgpack-1.0.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:55b56a24893105dc52c1253649b60f475f36b3aa0fc66115bffafb624d7cb30b"}, + {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:1967f6129fc50a43bfe0951c35acbb729be89a55d849fab7686004da85103f1c"}, + {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20a97bf595a232c3ee6d57ddaadd5453d174a52594bf9c21d10407e2a2d9b3bd"}, + {file = "msgpack-1.0.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d25dd59bbbbb996eacf7be6b4ad082ed7eacc4e8f3d2df1ba43822da9bfa122a"}, + {file = "msgpack-1.0.5-cp310-cp310-win32.whl", hash = "sha256:382b2c77589331f2cb80b67cc058c00f225e19827dbc818d700f61513ab47bea"}, + {file = "msgpack-1.0.5-cp310-cp310-win_amd64.whl", hash = "sha256:4867aa2df9e2a5fa5f76d7d5565d25ec76e84c106b55509e78c1ede0f152659a"}, + {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9f5ae84c5c8a857ec44dc180a8b0cc08238e021f57abdf51a8182e915e6299f0"}, + {file = "msgpack-1.0.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9e6ca5d5699bcd89ae605c150aee83b5321f2115695e741b99618f4856c50898"}, + {file = "msgpack-1.0.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5494ea30d517a3576749cad32fa27f7585c65f5f38309c88c6d137877fa28a5a"}, + {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ab2f3331cb1b54165976a9d976cb251a83183631c88076613c6c780f0d6e45a"}, + {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28592e20bbb1620848256ebc105fc420436af59515793ed27d5c77a217477705"}, + {file = "msgpack-1.0.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe5c63197c55bce6385d9aee16c4d0641684628f63ace85f73571e65ad1c1e8d"}, + {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ed40e926fa2f297e8a653c954b732f125ef97bdd4c889f243182299de27e2aa9"}, + {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b2de4c1c0538dcb7010902a2b97f4e00fc4ddf2c8cda9749af0e594d3b7fa3d7"}, + {file = "msgpack-1.0.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:bf22a83f973b50f9d38e55c6aade04c41ddda19b00c4ebc558930d78eecc64ed"}, + {file = "msgpack-1.0.5-cp311-cp311-win32.whl", hash = "sha256:c396e2cc213d12ce017b686e0f53497f94f8ba2b24799c25d913d46c08ec422c"}, + {file = "msgpack-1.0.5-cp311-cp311-win_amd64.whl", hash = "sha256:6c4c68d87497f66f96d50142a2b73b97972130d93677ce930718f68828b382e2"}, + {file = "msgpack-1.0.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:a2b031c2e9b9af485d5e3c4520f4220d74f4d222a5b8dc8c1a3ab9448ca79c57"}, + {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f837b93669ce4336e24d08286c38761132bc7ab29782727f8557e1eb21b2080"}, + {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1d46dfe3832660f53b13b925d4e0fa1432b00f5f7210eb3ad3bb9a13c6204a6"}, + {file = "msgpack-1.0.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:366c9a7b9057e1547f4ad51d8facad8b406bab69c7d72c0eb6f529cf76d4b85f"}, + {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:4c075728a1095efd0634a7dccb06204919a2f67d1893b6aa8e00497258bf926c"}, + {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:f933bbda5a3ee63b8834179096923b094b76f0c7a73c1cfe8f07ad608c58844b"}, + {file = "msgpack-1.0.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:36961b0568c36027c76e2ae3ca1132e35123dcec0706c4b7992683cc26c1320c"}, + {file = "msgpack-1.0.5-cp36-cp36m-win32.whl", hash = "sha256:b5ef2f015b95f912c2fcab19c36814963b5463f1fb9049846994b007962743e9"}, + {file = "msgpack-1.0.5-cp36-cp36m-win_amd64.whl", hash = "sha256:288e32b47e67f7b171f86b030e527e302c91bd3f40fd9033483f2cacc37f327a"}, + {file = "msgpack-1.0.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:137850656634abddfb88236008339fdaba3178f4751b28f270d2ebe77a563b6c"}, + {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c05a4a96585525916b109bb85f8cb6511db1c6f5b9d9cbcbc940dc6b4be944b"}, + {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56a62ec00b636583e5cb6ad313bbed36bb7ead5fa3a3e38938503142c72cba4f"}, + {file = "msgpack-1.0.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef8108f8dedf204bb7b42994abf93882da1159728a2d4c5e82012edd92c9da9f"}, + {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1835c84d65f46900920b3708f5ba829fb19b1096c1800ad60bae8418652a951d"}, + {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:e57916ef1bd0fee4f21c4600e9d1da352d8816b52a599c46460e93a6e9f17086"}, + {file = "msgpack-1.0.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:17358523b85973e5f242ad74aa4712b7ee560715562554aa2134d96e7aa4cbbf"}, + {file = "msgpack-1.0.5-cp37-cp37m-win32.whl", hash = "sha256:cb5aaa8c17760909ec6cb15e744c3ebc2ca8918e727216e79607b7bbce9c8f77"}, + {file = "msgpack-1.0.5-cp37-cp37m-win_amd64.whl", hash = "sha256:ab31e908d8424d55601ad7075e471b7d0140d4d3dd3272daf39c5c19d936bd82"}, + {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b72d0698f86e8d9ddf9442bdedec15b71df3598199ba33322d9711a19f08145c"}, + {file = "msgpack-1.0.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:379026812e49258016dd84ad79ac8446922234d498058ae1d415f04b522d5b2d"}, + {file = "msgpack-1.0.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:332360ff25469c346a1c5e47cbe2a725517919892eda5cfaffe6046656f0b7bb"}, + {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:476a8fe8fae289fdf273d6d2a6cb6e35b5a58541693e8f9f019bfe990a51e4ba"}, + {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9985b214f33311df47e274eb788a5893a761d025e2b92c723ba4c63936b69b1"}, + {file = "msgpack-1.0.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48296af57cdb1d885843afd73c4656be5c76c0c6328db3440c9601a98f303d87"}, + {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:addab7e2e1fcc04bd08e4eb631c2a90960c340e40dfc4a5e24d2ff0d5a3b3edb"}, + {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:916723458c25dfb77ff07f4c66aed34e47503b2eb3188b3adbec8d8aa6e00f48"}, + {file = "msgpack-1.0.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:821c7e677cc6acf0fd3f7ac664c98803827ae6de594a9f99563e48c5a2f27eb0"}, + {file = "msgpack-1.0.5-cp38-cp38-win32.whl", hash = "sha256:1c0f7c47f0087ffda62961d425e4407961a7ffd2aa004c81b9c07d9269512f6e"}, + {file = "msgpack-1.0.5-cp38-cp38-win_amd64.whl", hash = "sha256:bae7de2026cbfe3782c8b78b0db9cbfc5455e079f1937cb0ab8d133496ac55e1"}, + {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:20c784e66b613c7f16f632e7b5e8a1651aa5702463d61394671ba07b2fc9e025"}, + {file = "msgpack-1.0.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:266fa4202c0eb94d26822d9bfd7af25d1e2c088927fe8de9033d929dd5ba24c5"}, + {file = "msgpack-1.0.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:18334484eafc2b1aa47a6d42427da7fa8f2ab3d60b674120bce7a895a0a85bdd"}, + {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57e1f3528bd95cc44684beda696f74d3aaa8a5e58c816214b9046512240ef437"}, + {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:586d0d636f9a628ddc6a17bfd45aa5b5efaf1606d2b60fa5d87b8986326e933f"}, + {file = "msgpack-1.0.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a740fa0e4087a734455f0fc3abf5e746004c9da72fbd541e9b113013c8dc3282"}, + {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3055b0455e45810820db1f29d900bf39466df96ddca11dfa6d074fa47054376d"}, + {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a61215eac016f391129a013c9e46f3ab308db5f5ec9f25811e811f96962599a8"}, + {file = "msgpack-1.0.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:362d9655cd369b08fda06b6657a303eb7172d5279997abe094512e919cf74b11"}, + {file = "msgpack-1.0.5-cp39-cp39-win32.whl", hash = "sha256:ac9dd47af78cae935901a9a500104e2dea2e253207c924cc95de149606dc43cc"}, + {file = "msgpack-1.0.5-cp39-cp39-win_amd64.whl", hash = "sha256:06f5174b5f8ed0ed919da0e62cbd4ffde676a374aba4020034da05fab67b9164"}, + {file = "msgpack-1.0.5.tar.gz", hash = "sha256:c075544284eadc5cddc70f4757331d99dcbc16b2bbd4849d15f8aae4cf36d31c"}, ] [[package]] From f167b35de9802ced5fe37a98200f14a17befa2ec Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Mar 2023 10:28:15 +0000 Subject: [PATCH 304/344] Bump pysaml2 from 7.2.1 to 7.3.1 (#15254) * Bump pysaml2 from 7.2.1 to 7.3.1 Bumps [pysaml2](https://github.com/IdentityPython/pysaml2) from 7.2.1 to 7.3.1. - [Release notes](https://github.com/IdentityPython/pysaml2/releases) - [Changelog](https://github.com/IdentityPython/pysaml2/blob/v7.3.1/CHANGELOG.md) - [Commits](https://github.com/IdentityPython/pysaml2/compare/v7.2.1...v7.3.1) --- updated-dependencies: - dependency-name: pysaml2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15254.misc | 1 + poetry.lock | 15 +++++++-------- 2 files changed, 8 insertions(+), 8 deletions(-) create mode 100644 changelog.d/15254.misc diff --git a/changelog.d/15254.misc b/changelog.d/15254.misc new file mode 100644 index 000000000000..83287914c317 --- /dev/null +++ b/changelog.d/15254.misc @@ -0,0 +1 @@ +Bump pysaml2 from 7.2.1 to 7.3.1. diff --git a/poetry.lock b/poetry.lock index be1424b1f99b..b96e86d8aaaf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1788,26 +1788,25 @@ files = [ [[package]] name = "pysaml2" -version = "7.2.1" +version = "7.3.1" description = "Python implementation of SAML Version 2 Standard" category = "main" optional = true -python-versions = "<4,>=3.6" +python-versions = ">=3.6.2,<4.0.0" files = [ - {file = "pysaml2-7.2.1-py2.py3-none-any.whl", hash = "sha256:2ca155f4eeb1471b247a7b0cc79ccfd5780046d33d0b201e1199a00698dce795"}, - {file = "pysaml2-7.2.1.tar.gz", hash = "sha256:f40f9576dce9afef156469179277ffeeca36829248be333252af0517a26d0b1f"}, + {file = "pysaml2-7.3.1-py3-none-any.whl", hash = "sha256:2cc66e7a371d3f5ff9601f0ed93b5276cca816fce82bb38447d5a0651f2f5193"}, + {file = "pysaml2-7.3.1.tar.gz", hash = "sha256:eab22d187c6dd7707c58b5bb1688f9b8e816427667fc99d77f54399e15cd0a0a"}, ] [package.dependencies] cryptography = ">=3.1" defusedxml = "*" +importlib-metadata = {version = ">=1.7.0", markers = "python_version < \"3.8\""} importlib-resources = {version = "*", markers = "python_version < \"3.9\""} -pyOpenSSL = "*" +pyopenssl = "*" python-dateutil = "*" pytz = "*" -requests = ">=1.0.0" -setuptools = "*" -six = "*" +requests = ">=2,<3" xmlschema = ">=1.2.1" [package.extras] From 023f215c68daad0ee21082df143798f98d778bcd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Mar 2023 10:28:24 +0000 Subject: [PATCH 305/344] Bump serde from 1.0.152 to 1.0.155 (#15253) * Bump serde from 1.0.152 to 1.0.155 Bumps [serde](https://github.com/serde-rs/serde) from 1.0.152 to 1.0.155. - [Release notes](https://github.com/serde-rs/serde/releases) - [Commits](https://github.com/serde-rs/serde/compare/v1.0.152...v1.0.155) --- updated-dependencies: - dependency-name: serde dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- Cargo.lock | 8 ++++---- changelog.d/15253.misc | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15253.misc diff --git a/Cargo.lock b/Cargo.lock index f858b2107f6f..025d22b9d429 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -323,18 +323,18 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.152" +version = "1.0.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" +checksum = "71f2b4817415c6d4210bfe1c7bfcf4801b2d904cb4d0e1a8fdb651013c9e86b8" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.152" +version = "1.0.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" +checksum = "d071a94a3fac4aff69d023a7f411e33f40f3483f8c5190b1953822b6b76d7630" dependencies = [ "proc-macro2", "quote", diff --git a/changelog.d/15253.misc b/changelog.d/15253.misc new file mode 100644 index 000000000000..4aa75294c728 --- /dev/null +++ b/changelog.d/15253.misc @@ -0,0 +1 @@ +Bump serde from 1.0.152 to 1.0.155. From 408f60540f49a4feea1c2422856659e72f4f5e4a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Mar 2023 10:28:32 +0000 Subject: [PATCH 306/344] Bump hiredis from 2.2.1 to 2.2.2 (#15252) * Bump hiredis from 2.2.1 to 2.2.2 Bumps [hiredis](https://github.com/redis/hiredis-py) from 2.2.1 to 2.2.2. - [Release notes](https://github.com/redis/hiredis-py/releases) - [Changelog](https://github.com/redis/hiredis-py/blob/master/CHANGELOG.md) - [Commits](https://github.com/redis/hiredis-py/compare/v2.2.1...v2.2.2) --- updated-dependencies: - dependency-name: hiredis dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15252.misc | 1 + poetry.lock | 180 ++++++++++++++++++++--------------------- 2 files changed, 91 insertions(+), 90 deletions(-) create mode 100644 changelog.d/15252.misc diff --git a/changelog.d/15252.misc b/changelog.d/15252.misc new file mode 100644 index 000000000000..cf18d9e84982 --- /dev/null +++ b/changelog.d/15252.misc @@ -0,0 +1 @@ +Bump hiredis from 2.2.1 to 2.2.2. diff --git a/poetry.lock b/poetry.lock index b96e86d8aaaf..7a45975f32b0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -513,101 +513,101 @@ typing-extensions = {version = ">=3.7.4.3", markers = "python_version < \"3.8\"" [[package]] name = "hiredis" -version = "2.2.1" +version = "2.2.2" description = "Python wrapper for hiredis" category = "main" optional = true python-versions = ">=3.7" files = [ - {file = "hiredis-2.2.1-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:998ab35070dc81806a23be5de837466a51b25e739fb1a0d5313474d5bb29c829"}, - {file = "hiredis-2.2.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:70db8f514ebcb6f884497c4eee21d0350bbc4102e63502411f8e100cf3b7921e"}, - {file = "hiredis-2.2.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a57a4a33a78e94618d026fc68e853d3f71fa4a1d4da7a6e828e927819b001f2d"}, - {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:209b94fa473b39e174b665186cad73206ca849cf6e822900b761e83080f67b06"}, - {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:58e51d83b42fdcc29780897641b1dcb30c0e4d3c4f6d9d71d79b2cfec99b8eb7"}, - {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:706995fb1173fab7f12110fbad00bb95dd0453336f7f0b341b4ca7b1b9ff0bc7"}, - {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:812e27a9b20db967f942306267bcd8b1369d7c171831b6f45d22d75576cd01cd"}, - {file = "hiredis-2.2.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:69c32d54ac1f6708145c77d79af12f7448ca1025a0bf912700ad1f0be511026a"}, - {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:96745c4cdca261a50bd70c01f14c6c352a48c4d6a78e2d422040fba7919eadef"}, - {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:943631a49d7746cd413acaf0b712d030a15f02671af94c54759ba3144351f97a"}, - {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:796b616478a5c1cac83e9e10fcd803e746e5a02461bfa7767aebae8b304e2124"}, - {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:341952a311654c39433c1e0d8d31c2a0c5864b2675ed159ed264ecaa5cfb225b"}, - {file = "hiredis-2.2.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6fbb1a56d455602bd6c276d5c316ae245111b2dc8158355112f2d905e7471c85"}, - {file = "hiredis-2.2.1-cp310-cp310-win32.whl", hash = "sha256:14f67987e1d55b197e46729d1497019228ad8c94427bb63500e6f217aa586ca5"}, - {file = "hiredis-2.2.1-cp310-cp310-win_amd64.whl", hash = "sha256:ea011b3bfa37f2746737860c1e5ba198b63c9b4764e40b042aac7bd2c258938f"}, - {file = "hiredis-2.2.1-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:103bde304d558061c4ba1d7ff94351e761da753c28883fd68964f25080152dac"}, - {file = "hiredis-2.2.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6ba9f425739a55e1409fda5dafad7fdda91c6dcd2b111ba93bb7b53d90737506"}, - {file = "hiredis-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cb59a7535e0b8373f694ce87576c573f533438c5fbee450193333a22118f4a98"}, - {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6afbddc82bbb2c4c405d9a49a056ffe6541f8ad3160df49a80573b399f94ba3a"}, - {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a386f00800b1b043b091b93850e02814a8b398952438a9d4895bd70f5c80a821"}, - {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fec7465caac7b0a36551abb37066221cabf59f776d78fdd58ff17669942b4b41"}, - {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd590dd7858d0107c37b438aa27bbcaa0ba77c5b8eda6ebab7acff0aa89f7d7"}, - {file = "hiredis-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1523ec56d711bee863aaaf4325cef4430da3143ec388e60465f47e28818016cd"}, - {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d4f6bbe599d255a504ef789c19e23118c654d256343c1ecdf7042fb4b4d0f7fa"}, - {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d77dbc13d55c1d45d6a203da910002fffd13fa310af5e9c5994959587a192789"}, - {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b2b847ea3f9af99e02c4c58b7cc6714e105c8d73705e5ff1132e9a249391f688"}, - {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:18135ecf28fc6577e71c0f8d8eb2f31e4783020a7d455571e7e5d2793374ce20"}, - {file = "hiredis-2.2.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:724aed63871bc386d6f28b5f4d15490d84934709f093e021c4abb785e72db5db"}, - {file = "hiredis-2.2.1-cp311-cp311-win32.whl", hash = "sha256:497a8837984ddfbf6f5a4c034c0107f2c5aaaebeebf34e2c6ab591acffce5f12"}, - {file = "hiredis-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:1776db8af168b22588ec10c3df674897b20cc6d25f093cd2724b8b26d7dac057"}, - {file = "hiredis-2.2.1-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:49a518b456403602775218062a4dd06bed42b26854ff1ff6784cfee2ef6fa347"}, - {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:02118dc8545e2371448b9983a0041f12124eea907eb61858f2be8e7c1dfa1e43"}, - {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78f2a53149b116e0088f6eda720574f723fbc75189195aab8a7a2a591ca89cab"}, - {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e3b8f0eba6d88c2aec63e6d1e38960f8a25c01f9796d32993ffa1cfcf48744c"}, - {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38270042f40ed9e576966c603d06c984c80364b0d9ec86962a31551dae27b0cd"}, - {file = "hiredis-2.2.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a11250dd0521e9f729325b19ce9121df4cbb80ad3468cc21e56803e8380bc4b"}, - {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:595474e6c25f1c3c8ec67d587188e7dd47c492829b2c7c5ba1b17ee9e7e9a9ea"}, - {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:8ad00a7621de8ef9ae1616cf24a53d48ad1a699b96668637559a8982d109a800"}, - {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:a5e5e51faa7cd02444d4ee1eb59e316c08e974bcfa3a959cb790bc4e9bb616c5"}, - {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:0a9493bbc477436a3725e99cfcba768f416ab70ab92956e373d1a3b480b1e204"}, - {file = "hiredis-2.2.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:231e5836579fc75b25c6f9bb6213950ea3d39aadcfeb7f880211ca55df968342"}, - {file = "hiredis-2.2.1-cp37-cp37m-win32.whl", hash = "sha256:2ed6c948648798b440a9da74db65cdd2ad22f38cf4687f5212df369031394591"}, - {file = "hiredis-2.2.1-cp37-cp37m-win_amd64.whl", hash = "sha256:c65f38418e35970d44f9b5a59533f0f60f14b9f91b712dba51092d2c74d4dcd1"}, - {file = "hiredis-2.2.1-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:2f6e80fb7cd4cc61af95ab2875801e4c36941a956c183297c3273cbfbbefa9d3"}, - {file = "hiredis-2.2.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:a54d2b3328a2305e0dfb257a4545053fdc64df0c64e0635982e191c846cc0456"}, - {file = "hiredis-2.2.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:33624903dfb629d6f7c17ed353b4b415211c29fd447f31e6bf03361865b97e68"}, - {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f4b92df1e69dc48411045d2117d1d27ec6b5f0dd2b6501759cea2f6c68d5618"}, - {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03c6a1f6bf2f64f40d076c997cdfcb8b3d1c9557dda6cb7bbad2c5c839921726"}, - {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3af3071d33432960cba88ce4e4932b508ab3e13ce41431c2a1b2dc9a988f7627"}, - {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb3f56d371b560bf39fe45d29c24e3d819ae2399733e2c86394a34e76adab38"}, - {file = "hiredis-2.2.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5da26970c41084a2ac337a4f075301a78cffb0e0f3df5e98c3049fc95e10725c"}, - {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d87f90064106dfd7d2cc7baeb007a8ca289ee985f4bf64bb627c50cdc34208ed"}, - {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c233199b9f4dd43e2297577e32ba5fcd0378871a47207bc424d5e5344d030a3e"}, - {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:99b5bcadd5e029234f89d244b86bc8d21093be7ac26111068bebd92a4a95dc73"}, - {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:ed79f65098c4643cb6ec4530b337535f00b58ea02e25180e3df15e9cc9da58dc"}, - {file = "hiredis-2.2.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c7fd6394779c9a3b324b65394deadb949311662f3770bd34f904b8c04328082c"}, - {file = "hiredis-2.2.1-cp38-cp38-win32.whl", hash = "sha256:bde0178e7e6c49e408b8d3a8c0ec8e69a23e8dc2ae29f87af2d74b21025385dc"}, - {file = "hiredis-2.2.1-cp38-cp38-win_amd64.whl", hash = "sha256:6f5f469ba5ae613e4c652cdedfc723aa802329fcc2d65df1e9ab0ac0de34ad9e"}, - {file = "hiredis-2.2.1-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:e5945ef29a76ab792973bef1ffa2970d81dd22edb94dfa5d6cba48beb9f51962"}, - {file = "hiredis-2.2.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:bad6e9a0e31678ee15ac3ef72e77c08177c86df05c37d2423ff3cded95131e51"}, - {file = "hiredis-2.2.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e57dfcd72f036cce9eab77bc533a932444459f7e54d96a555d25acf2501048be"}, - {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3afc76a012b907895e679d1e6bcc6394845d0cc91b75264711f8caf53d7b0f37"}, - {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a99c0d50d1a31be285c83301eff4b911dca16aac1c3fe1875c7d6f517a1e9fc4"}, - {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d8849bc74473778c10377f82cf9a534e240734e2f9a92c181ef6d51b4e3d3eb2"}, - {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e199868fe78c2d175bbb7b88f5daf2eae4a643a62f03f8d6736f9832f04f88b"}, - {file = "hiredis-2.2.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a0e98106a28fabb672bb014f6c4506cc67491e4cf9ac56d189cbb1e81a9a3e68"}, - {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0f2607e08dcb1c5d1e925c451facbfc357927acaa336a004552c32a6dd68e050"}, - {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:954abb363ed1d18dfb7510dbd89402cb7c21106307e04e2ee7bccf35a134f4dd"}, - {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:0474ab858f5dd15be6b467d89ec14b4c287f53b55ca5455369c3a1a787ef3a24"}, - {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:b90dd0adb1d659f8c94b32556198af1e61e38edd27fc7434d4b3b68ad4e51d37"}, - {file = "hiredis-2.2.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7a5dac3ae05bc64b233f950edf37dce9c904aedbc7e18cfc2adfb98edb85da46"}, - {file = "hiredis-2.2.1-cp39-cp39-win32.whl", hash = "sha256:19666eb154b7155d043bf941e50d1640125f92d3294e2746df87639cc44a10e6"}, - {file = "hiredis-2.2.1-cp39-cp39-win_amd64.whl", hash = "sha256:c702dd28d52656bb86f7a2a76ea9341ac434810871b51fcd6cd28c6d7490fbdf"}, - {file = "hiredis-2.2.1-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c604919bba041e4c4708ecb0fe6c7c8a92a7f1e886b0ae8d2c13c3e4abfc5eda"}, - {file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04c972593f26f4769e2be7058b7928179337593bcfc6a8b6bda87eea807b7cbf"}, - {file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42504e4058246536a9f477f450ab21275126fc5f094be5d5e5290c6de9d855f9"}, - {file = "hiredis-2.2.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:220b6ac9d3fce60d14ccc34f9790e20a50dc56b6fb747fc357600963c0cf6aca"}, - {file = "hiredis-2.2.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:a16d81115128e6a9fc6904de051475be195f6c460c9515583dccfd407b16ff78"}, - {file = "hiredis-2.2.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:df6325aade17b1f86c8b87f6a1d9549a4184fda00e27e2fca0e5d2a987130365"}, - {file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcad9c9239845b29f149a895e7e99b8307889cecbfc37b69924c2dad1f4ae4e8"}, - {file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ccf6fc116795d76bca72aa301a33874c507f9e77402e857d298c73419b5ea3"}, - {file = "hiredis-2.2.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:63f941e77c024be2a1451089e2fdbd5ff450ff0965f49948bbeb383aef1799ea"}, - {file = "hiredis-2.2.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:2bb682785a37145b209f44f5d5290b0f9f4b56205542fc592d0f1b3d5ffdfcf0"}, - {file = "hiredis-2.2.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:8fe289556264cb1a2efbcd3d6b3c55e059394ad01b6afa88151264137f85c352"}, - {file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96b079c53b6acd355edb6fe615270613f3f7ddc4159d69837ce15ec518925c40"}, - {file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82ad46d1140c5779cd9dfdafc35f47dd09dadff7654d8001c50bb283da82e7c9"}, - {file = "hiredis-2.2.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:17e9f363db56a8edb4eff936354cfa273197465bcd970922f3d292032eca87b0"}, - {file = "hiredis-2.2.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ae6b356ed166a0ec663a46b547c988815d2b0e5f2d0af31ef34a16cf3ce705d0"}, - {file = "hiredis-2.2.1.tar.gz", hash = "sha256:d9fbef7f9070055a7cc012ac965e3dbabbf2400b395649ea8d6016dc82a7d13a"}, + {file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:ba6123ff137275e2f4c31fc74b93813fcbb79160d43f5357163e09638c7743de"}, + {file = "hiredis-2.2.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d995846acc8e3339fb7833cd19bf6f3946ff5157c8488a4df9c51cd119a36870"}, + {file = "hiredis-2.2.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:82f869ca44bcafa37cd71cfa1429648fa354d6021dcd72f03a2f66bcb339c546"}, + {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa90a5ee7a7f30c3d72d3513914b8f51f953a71b8cbd52a241b6db6685e55645"}, + {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01e2e588392b5fdcc3a6aa0eb62a2eb2a142f829082fa4c3354228029d3aa1ce"}, + {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5dac177a6ab8b4eb4d5e74978c29eef7cc9eef14086f814cb3893f7465578044"}, + {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cb992e3f9753c5a0c637f333c2010d1ad702aebf2d730ee4d484f32b19bae97"}, + {file = "hiredis-2.2.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e61c22fda5fc25d31bbced24a8322d33c5cb8cad9ba698634c16edb5b3e79a91"}, + {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9873898e26e50cd41415e9d1ea128bfdb60eb26abb4f5be28a4500fd7834dc0c"}, + {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2c18b00a382546e19bcda8b83dcca5b6e0dbc238d235723434405f48a18e8f77"}, + {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:8c3a6998f6f88d7ca4d082fd26525074df13162b274d7c64034784b6fdc56666"}, + {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0fc1f9a9791d028b2b8afa318ccff734c7fc8861d37a04ca9b3d27c9b05f9718"}, + {file = "hiredis-2.2.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f2cfd323f83985f2bed6ed013107873275025af270485b7d04c338bfb47bd14"}, + {file = "hiredis-2.2.2-cp310-cp310-win32.whl", hash = "sha256:55c7e9a9e05f8c0555bfba5c16d98492f8b6db650e56d0c35cc28aeabfc86020"}, + {file = "hiredis-2.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:eaff526c2fed31c971b0fa338a25237ae5513550ef75d0b85b9420ec778cca45"}, + {file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:688b9b7458b4f3f452fea6ed062c04fa1fd9a69d9223d95c6cb052581aba553b"}, + {file = "hiredis-2.2.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:544d52fde3a8dac7854673eac20deca05214758193c01926ffbb0d57c6bf4ffe"}, + {file = "hiredis-2.2.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:990916e8b0b4eedddef787e73549b562f8c9e73a7fea82f9b8ff517806774ad0"}, + {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10dc34854e9acfb3e7cc4157606e2efcb497b1c6fca07bd6c3be34ae5e413f13"}, + {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c446a2007985ae49c2ecd946dd819dea72b931beb5f647ba08655a1a1e133fa8"}, + {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:02b9f928dc6cd43ed0f0ffc1c75fb209fb180f004b7e2e19994805f998d247aa"}, + {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a355aff8dfa02ebfe67f0946dd706e490bddda9ea260afac9cdc43942310c53"}, + {file = "hiredis-2.2.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:831461abe5b63e73719621a5f31d8fc175528a05dc09d5a8aa8ef565d6deefa4"}, + {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:75349f7c8f77eb0fd33ede4575d1e5b0a902a8176a436bf03293d7fec4bd3894"}, + {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1eb39b34d15220095dc49ad1e1082580d35cd3b6d9741def52988b5075e4ff03"}, + {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:a9b306f4e870747eea8b008dcba2e9f1e4acd12b333a684bc1cc120e633a280e"}, + {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:03dfb4ab7a2136ce1be305592553f102e1bd91a96068ab2778e3252aed20d9bc"}, + {file = "hiredis-2.2.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d8bc89c7e33fecb083a199ade0131a34d20365a8c32239e218da57290987ca9a"}, + {file = "hiredis-2.2.2-cp311-cp311-win32.whl", hash = "sha256:ed44b3c711cecde920f238ac35f70ac08744f2079b6369655856e43944464a72"}, + {file = "hiredis-2.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:2e2f0ce3e8ab1314a52f562386220f6714fd24d7968a95528135ad04e88cc741"}, + {file = "hiredis-2.2.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:e7e61ab75b851aac2d6bc634d03738a242a6ef255a44178437b427c5ebac0a87"}, + {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9eb14339e399554bb436cc4628e8aaa3943adf7afcf34aba4cbd1e3e6b9ec7ec"}, + {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4ec57886f20f4298537cb1ab9dbda98594fb8d7c724c5fbf9a4b55329fd4a63"}, + {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a89f5afb9827eab07b9c8c585cd4dc95e5232c727508ae2c935d09531abe9e33"}, + {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3645590b9234cafd21c8ecfbf252ad9aa1d67629f4bdc98ba3627f48f8f7b5aa"}, + {file = "hiredis-2.2.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99350e89f52186146938bdba0b9c6cd68802c20346707d6ca8366f2d69d89b2f"}, + {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b5d290f3d8f7a05c4adbe6c355055b87c7081bfa1eccd1ae5491216307ee5f53"}, + {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c95be6f20377d5995ef41a98314542e194d2dc9c2579d8f130a1aea78d48fd42"}, + {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e4e2da61a04251121cb551f569c3250e6e27e95f2a80f8351c36822eda1f5d2b"}, + {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:ac7f8d68826f95a3652e44b0c12bfa74d3aa6531d47d5dbe6a2fbfc7979bc20f"}, + {file = "hiredis-2.2.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:359e662324318baadb768d3c4ade8c4bdcfbb313570eb01e15d75dc5db781815"}, + {file = "hiredis-2.2.2-cp37-cp37m-win32.whl", hash = "sha256:fd0ca35e2cf44866137cbb5ae7e439fab18a0b0e0e1cf51d45137622d59ec012"}, + {file = "hiredis-2.2.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c9488ffb10acc6b121c498875278b0a6715d193742dc92d21a281712169ac06d"}, + {file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:1570fe4f93bc1ea487fb566f2b863fd0ed146f643a4ea31e4e07036db9e0c7f8"}, + {file = "hiredis-2.2.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:8753c561b37cccbda7264c9b4486e206a6318c18377cd647beb3aa41a15a6beb"}, + {file = "hiredis-2.2.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a06d0dd84f10be6b15a92edbca2490b64917280f66d8267c63de99b6550308ad"}, + {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40ff3f1ec3a4046732e9e41df08dcb1a559847196755d295d43e32528aae39e6"}, + {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c24d856e13c02bd9d28a189e47be70cbba6f2c2a4bd85a8cc98819db9e7e3e06"}, + {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ee9fe7cef505e8d925c70bebcc16bfab12aa7af922f948346baffd4730f7b00"}, + {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03ab1d545794bb0e09f3b1e2c8b3adcfacd84f6f2d402bfdcd441a98c0e9643c"}, + {file = "hiredis-2.2.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:14dfccf4696d75395c587a5dafafb4f7aa0a5d55309341d10bc2e7f1eaa20771"}, + {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2ddc573809ca4374da1b24b48604f34f3d5f0911fcccfb1c403ff8d8ca31c232"}, + {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:24301ca2bf9b2f843b4c3015c90f161798fa3bbc5b95fd494785751b137dbbe2"}, + {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:b083a69e158138ffa95740ff6984d328259387b5596908021b3ccb946469ff66"}, + {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:8e16dc949cc2e9c5fbcd08de05b5fb61b89ff65738d772863c5c96248628830e"}, + {file = "hiredis-2.2.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:674f296c3c89cb53f97aa9ba2508d3f360ad481b9e0c0e3a59b342a15192adaf"}, + {file = "hiredis-2.2.2-cp38-cp38-win32.whl", hash = "sha256:20ecbf87aac4f0f33f9c55ae15cb73b485d256c57518c590b7d0c9c152150632"}, + {file = "hiredis-2.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:b11960237a3025bf248135e5b497dc4923e83d137eb798fbfe78b40d57c4b156"}, + {file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:18103090b8eda9c529830e26594e88b0b1472055785f3ed29b8adc694d03862a"}, + {file = "hiredis-2.2.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:d1acb7c957e5343303b3862947df3232dc7395da320b3b9ae076dfaa56ad59dc"}, + {file = "hiredis-2.2.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4997f55e1208af95a8fbd0fa187b04c672fcec8f66e49b9ab7fcc45cc1657dc4"}, + {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:449e18506d22af40977abd0f5a8979f57f88d4562fe591478a3438d76a15133d"}, + {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a32a4474f7a4abdea954f3365608edee3f90f1de9fa05b81d214d4cad04c718a"}, + {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e86c800c6941698777fc58419216a66a7f76504f1cea72381d2ee206888e964d"}, + {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c73aa295c5369135247ff63aa1fbb116067485d0506cd787cc0c868e72bbee55"}, + {file = "hiredis-2.2.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e10a66680023bd5c5a3d605dae0844e3dde60eac5b79e39f51395a2aceaf634"}, + {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:03ab760fc96e0c5d36226eb727f30645bf6a53c97f14bfc0a4d0401bfc9b8af7"}, + {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:855d258e7f1aee3d7fbd5b1dc87790b1b5016e23d369a97b934a25ae7bc0171f"}, + {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ccc33d87866d213f84f857a98f69c13f94fbf99a3304e328869890c9e49c8d65"}, + {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:339af17bb9817f8acb127247c79a99cad63db6738c0fb2aec9fa3d4f35d2a250"}, + {file = "hiredis-2.2.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:57f73aa04d0b70ff436fb35fa7ea2b796aa7addbd7ebb8d1aa1f3d1b3e4439f1"}, + {file = "hiredis-2.2.2-cp39-cp39-win32.whl", hash = "sha256:e97d4e650b8d933a1229f341db92b610fc52b8d752490235977b63b81fbbc2cb"}, + {file = "hiredis-2.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:8d43a7bba66a800279e33229a206861be09c279e261eaa8db4824e59465f4848"}, + {file = "hiredis-2.2.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632d79fd02b03e8d9fbaebbe40bfe34b920c5d0a9c0ef6270752e0db85208175"}, + {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a5fefac31c84143782ec1ebc323c04e733a6e4bfebcef9907a34e47a465e648"}, + {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5155bc1710df8e21aa48c9b2f4d4e13e4987e1efff363a1ef9c84fae2cc6c145"}, + {file = "hiredis-2.2.2-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f220b71235d2deab1b4b22681c8aee444720d973b80f1b86a4e2a85f6bcf1e1"}, + {file = "hiredis-2.2.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:f1f1efbe9cc29a3af39cf7eed27225f951aed3f48a1149c7fb74529fb5ab86d4"}, + {file = "hiredis-2.2.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:1f1c44242c18b1f02e6d1162f133d65d00e09cc10d9165dccc78662def72abc2"}, + {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e0f444d9062f7e487ef42bab2fb2e290f1704afcbca48ad3ec23de63eef0fda"}, + {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac15e7e1efca51b4695e540c80c328accb352c9608da7c2df82d1fa1a3c539ef"}, + {file = "hiredis-2.2.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:20cfbc469400669a5999aa34ccba3872a1e34490ec3d5c84e8c0752c27977b7c"}, + {file = "hiredis-2.2.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:bae004a0b978bf62e38d0eef5ab9156f8101d01167b3ca7054bd0994b773e917"}, + {file = "hiredis-2.2.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:a1ce725542133dbdda9e8704867ef52651886bd1ef568c6fd997a27404381985"}, + {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e6ea7532221c97fa6d79f7d19d452cd9d1141d759c54279cc4774ce24728f13"}, + {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7114961ed78d708142f6c6eb1d2ed65dc3da4b5ae8a4660ad889dd7fc891971"}, + {file = "hiredis-2.2.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1b084fbc3e69f99865242f8e1ccd4ea2a34bf6a3983d015d61133377526c0ce2"}, + {file = "hiredis-2.2.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2d1ba0799f3487294f72b2157944d5c3a4fb33c99e2d495d63eab98c7ec7234b"}, + {file = "hiredis-2.2.2.tar.gz", hash = "sha256:9c270bd0567a9c60673284e000132f603bb4ecbcd707567647a68f85ef45c4d4"}, ] [[package]] From d4eba4409f984307485b10e6b77d5edb4d93f440 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 13 Mar 2023 12:12:02 +0000 Subject: [PATCH 307/344] Install rust during Stage 0 of docker build (#15239) * Install rust during Stage 0 of docker build Thanks to @atomdmac for spotting the fix. Fixes #15179. * Changelog --- changelog.d/15239.docker | 1 + docker/Dockerfile | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15239.docker diff --git a/changelog.d/15239.docker b/changelog.d/15239.docker new file mode 100644 index 000000000000..2aad329e99b3 --- /dev/null +++ b/changelog.d/15239.docker @@ -0,0 +1 @@ +Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel. diff --git a/docker/Dockerfile b/docker/Dockerfile index a85fd3d691c8..3d07bcd71f06 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -37,9 +37,24 @@ RUN \ --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update -qq && apt-get install -yqq \ - build-essential git libffi-dev libssl-dev \ + build-essential curl git libffi-dev libssl-dev \ && rm -rf /var/lib/apt/lists/* +# Install rust and ensure its in the PATH. +# (Rust may be needed to compile `cryptography`---which is one of poetry's +# dependencies---on platforms that don't have a `cryptography` wheel. +ENV RUSTUP_HOME=/rust +ENV CARGO_HOME=/cargo +ENV PATH=/cargo/bin:/rust/bin:$PATH +RUN mkdir /rust /cargo + +RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable --profile minimal + +# arm64 builds consume a lot of memory if `CARGO_NET_GIT_FETCH_WITH_CLI` is not +# set to true, so we expose it as a build-arg. +ARG CARGO_NET_GIT_FETCH_WITH_CLI=false +ENV CARGO_NET_GIT_FETCH_WITH_CLI=$CARGO_NET_GIT_FETCH_WITH_CLI + # We install poetry in its own build stage to avoid its dependencies conflicting with # synapse's dependencies. RUN --mount=type=cache,target=/root/.cache/pip \ From c071cd5a0ebc2983e5576036ffef3668ba2a30cd Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 13 Mar 2023 12:31:19 +0000 Subject: [PATCH 308/344] Ensure fed-sender catchup does not block for full state (#15248) * Reproduce bad scenario in test * Avoid catchup optimisation for partial state rooms --- changelog.d/15248.bugfix | 1 + .../sender/per_destination_queue.py | 9 +- tests/federation/test_federation_catch_up.py | 87 ++++++++++++++++++- tests/test_utils/event_injection.py | 31 +++++++ 4 files changed, 125 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15248.bugfix diff --git a/changelog.d/15248.bugfix b/changelog.d/15248.bugfix new file mode 100644 index 000000000000..8665acb49380 --- /dev/null +++ b/changelog.d/15248.bugfix @@ -0,0 +1 @@ +Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index 478187ce449a..31c5c2b7dee2 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -497,8 +497,8 @@ async def _catch_up_transmission_loop(self) -> None: # # Note: `catchup_pdus` will have exactly one PDU per room. for pdu in catchup_pdus: - # The PDU from the DB will be the last PDU in the room from - # *this server* that wasn't sent to the remote. However, other + # The PDU from the DB will be the newest PDU in the room from + # *this server* that we tried---but were unable---to send to the remote. # servers may have sent lots of events since then, and we want # to try and tell the remote only about the *latest* events in # the room. This is so that it doesn't get inundated by events @@ -516,6 +516,11 @@ async def _catch_up_transmission_loop(self) -> None: # If the event is in the extremities, then great! We can just # use that without having to do further checks. room_catchup_pdus = [pdu] + elif await self._store.is_partial_state_room(pdu.room_id): + # We can't be sure which events the destination should + # see using only partial state. Avoid doing so, and just retry + # sending our the newest PDU the remote is missing from us. + room_catchup_pdus = [pdu] else: # If not, fetch the extremities and figure out which we can # send. diff --git a/tests/federation/test_federation_catch_up.py b/tests/federation/test_federation_catch_up.py index 6381583c24b4..391ae5170729 100644 --- a/tests/federation/test_federation_catch_up.py +++ b/tests/federation/test_federation_catch_up.py @@ -1,4 +1,5 @@ -from typing import Callable, List, Optional, Tuple +from typing import Callable, Collection, List, Optional, Tuple +from unittest import mock from unittest.mock import Mock from twisted.test.proto_helpers import MemoryReactor @@ -500,3 +501,87 @@ def test_not_latest_event(self) -> None: self.assertEqual(len(sent_pdus), 1) self.assertEqual(sent_pdus[0].event_id, event_2.event_id) self.assertFalse(per_dest_queue._catching_up) + + def test_catch_up_is_not_blocked_by_remote_event_in_partial_state_room( + self, + ) -> None: + """Detects (part of?) https://github.com/matrix-org/synapse/issues/15220.""" + # ARRANGE: + # - a local user (u1) + # - a room which contains u1 and two remote users, @u2:host2 and @u3:other + # - events in that room such that + # - history visibility is restricted + # - u1 sent message events e1 and e2 + # - afterwards, u3 sent a remote event e3 + # - catchup to begin for host2; last successfully sent event was e1 + per_dest_queue, sent_pdus = self.make_fake_destination_queue() + + self.register_user("u1", "you the one") + u1_token = self.login("u1", "you the one") + room = self.helper.create_room_as("u1", tok=u1_token) + self.helper.send_state( + room_id=room, + event_type="m.room.history_visibility", + body={"history_visibility": "joined"}, + tok=u1_token, + ) + self.get_success( + event_injection.inject_member_event(self.hs, room, "@u2:host2", "join") + ) + self.get_success( + event_injection.inject_member_event(self.hs, room, "@u3:other", "join") + ) + + # create some events + event_id_1 = self.helper.send(room, "hello", tok=u1_token)["event_id"] + event_id_2 = self.helper.send(room, "world", tok=u1_token)["event_id"] + # pretend that u3 changes their displayname + event_id_3 = self.get_success( + event_injection.inject_member_event(self.hs, room, "@u3:other", "join") + ).event_id + + # destination_rooms should already be populated, but let us pretend that we already + # sent (successfully) up to and including event id 1 + event_1 = self.get_success(self.hs.get_datastores().main.get_event(event_id_1)) + assert event_1.internal_metadata.stream_ordering is not None + self.get_success( + self.hs.get_datastores().main.set_destination_last_successful_stream_ordering( + "host2", event_1.internal_metadata.stream_ordering + ) + ) + + # also fetch event 2 so we can compare its stream ordering to the sender's + # last_successful_stream_ordering later + event_2 = self.get_success(self.hs.get_datastores().main.get_event(event_id_2)) + + # Mock event 3 as having partial state + self.get_success( + event_injection.mark_event_as_partial_state(self.hs, event_id_3, room) + ) + + # Fail the test if we block on full state for event 3. + async def mock_await_full_state(event_ids: Collection[str]) -> None: + if event_id_3 in event_ids: + raise AssertionError("Tried to await full state for event_id_3") + + # ACT + with mock.patch.object( + self.hs.get_storage_controllers().state._partial_state_events_tracker, + "await_full_state", + mock_await_full_state, + ): + self.get_success(per_dest_queue._catch_up_transmission_loop()) + + # ASSERT + # We should have: + # - not sent event 3: it's not ours, and the room is partial stated + # - fallen back to sending event 2: it's the most recent event in the room + # we tried to send to host2 + # - completed catch-up + self.assertEqual(len(sent_pdus), 1) + self.assertEqual(sent_pdus[0].event_id, event_id_2) + self.assertFalse(per_dest_queue._catching_up) + self.assertEqual( + per_dest_queue._last_successful_stream_ordering, + event_2.internal_metadata.stream_ordering, + ) diff --git a/tests/test_utils/event_injection.py b/tests/test_utils/event_injection.py index a6330ed840f0..9679904c3321 100644 --- a/tests/test_utils/event_injection.py +++ b/tests/test_utils/event_injection.py @@ -102,3 +102,34 @@ async def create_event( context = await unpersisted_context.persist(event) return event, context + + +async def mark_event_as_partial_state( + hs: synapse.server.HomeServer, + event_id: str, + room_id: str, +) -> None: + """ + (Falsely) mark an event as having partial state. + + Naughty, but occasionally useful when checking that partial state doesn't + block something from happening. + + If the event already has partial state, this insert will fail (event_id is unique + in this table). + """ + store = hs.get_datastores().main + await store.db_pool.simple_upsert( + table="partial_state_rooms", + keyvalues={"room_id": room_id}, + values={}, + insertion_values={"room_id": room_id}, + ) + + await store.db_pool.simple_insert( + table="partial_state_events", + values={ + "room_id": room_id, + "event_id": event_id, + }, + ) From edcf93817399b409b832f54b8caac0af277eba96 Mon Sep 17 00:00:00 2001 From: David Robertson Date: Mon, 13 Mar 2023 12:57:56 +0000 Subject: [PATCH 309/344] 1.79.0rc2 --- CHANGES.md | 16 ++++++++++++++++ changelog.d/15227.bugfix | 1 - changelog.d/15240.misc | 1 - changelog.d/15248.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 6 files changed, 23 insertions(+), 4 deletions(-) delete mode 100644 changelog.d/15227.bugfix delete mode 100644 changelog.d/15240.misc delete mode 100644 changelog.d/15248.bugfix diff --git a/CHANGES.md b/CHANGES.md index 2357be33fb25..43259f323cf7 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,19 @@ +Synapse 1.79.0rc2 (2023-03-13) +============================== + +Bugfixes +-------- + +- Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. ([\#15227](https://github.com/matrix-org/synapse/issues/15227)) +- Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. ([\#15248](https://github.com/matrix-org/synapse/issues/15248)) + + +Internal Changes +---------------- + +- Refactor `filter_events_for_server`. ([\#15240](https://github.com/matrix-org/synapse/issues/15240)) + + Synapse 1.79.0rc1 (2023-03-07) ============================== diff --git a/changelog.d/15227.bugfix b/changelog.d/15227.bugfix deleted file mode 100644 index eaa26c8f7fa3..000000000000 --- a/changelog.d/15227.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.79.0rc1 where attempting to register a `on_remove_user_third_party_identifier` module API callback would be a no-op. \ No newline at end of file diff --git a/changelog.d/15240.misc b/changelog.d/15240.misc deleted file mode 100644 index 2b7edf916ec5..000000000000 --- a/changelog.d/15240.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `filter_events_for_server`. diff --git a/changelog.d/15248.bugfix b/changelog.d/15248.bugfix deleted file mode 100644 index 8665acb49380..000000000000 --- a/changelog.d/15248.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a rare bug introduced in Synapse 1.73 where events could remain unsent to other homeservers after a faster-join to a room. diff --git a/debian/changelog b/debian/changelog index 871c695f07c4..a91521f6b235 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium + + * New Synapse release 1.79.0rc2. + + -- Synapse Packaging team Mon, 13 Mar 2023 12:54:21 +0000 + matrix-synapse-py3 (1.79.0~rc1) stable; urgency=medium * New Synapse release 1.79.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 90a118741641..dbdc3c49913e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.79.0rc1" +version = "1.79.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 5e21e15f96eb194358117de7a423b4af7fd74054 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Mar 2023 14:12:03 +0000 Subject: [PATCH 310/344] Bump cryptography from 39.0.1 to 39.0.2 (#15257) * Bump cryptography from 39.0.1 to 39.0.2 Bumps [cryptography](https://github.com/pyca/cryptography) from 39.0.1 to 39.0.2. - [Release notes](https://github.com/pyca/cryptography/releases) - [Changelog](https://github.com/pyca/cryptography/blob/main/CHANGELOG.rst) - [Commits](https://github.com/pyca/cryptography/compare/39.0.1...39.0.2) --- updated-dependencies: - dependency-name: cryptography dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Changelog --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: GitHub Actions --- changelog.d/15257.misc | 1 + poetry.lock | 48 +++++++++++++++++++++--------------------- 2 files changed, 25 insertions(+), 24 deletions(-) create mode 100644 changelog.d/15257.misc diff --git a/changelog.d/15257.misc b/changelog.d/15257.misc new file mode 100644 index 000000000000..2c0932233ca2 --- /dev/null +++ b/changelog.d/15257.misc @@ -0,0 +1 @@ +Bump cryptography from 39.0.1 to 39.0.2. diff --git a/poetry.lock b/poetry.lock index 7a45975f32b0..518fa5874be9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -352,35 +352,35 @@ files = [ [[package]] name = "cryptography" -version = "39.0.1" +version = "39.0.2" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." category = "main" optional = false python-versions = ">=3.6" files = [ - {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965"}, - {file = "cryptography-39.0.1-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f"}, - {file = "cryptography-39.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106"}, - {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c"}, - {file = "cryptography-39.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4"}, - {file = "cryptography-39.0.1-cp36-abi3-win32.whl", hash = "sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8"}, - {file = "cryptography-39.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c5caeb8188c24888c90b5108a441c106f7faa4c4c075a2bcae438c6e8ca73cef"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:4789d1e3e257965e960232345002262ede4d094d1a19f4d3b52e48d4d8f3b885"}, - {file = "cryptography-39.0.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6"}, - {file = "cryptography-39.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a"}, - {file = "cryptography-39.0.1.tar.gz", hash = "sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695"}, + {file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:2725672bb53bb92dc7b4150d233cd4b8c59615cd8288d495eaa86db00d4e5c06"}, + {file = "cryptography-39.0.2-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:23df8ca3f24699167daf3e23e51f7ba7334d504af63a94af468f468b975b7dd7"}, + {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:eb40fe69cfc6f5cdab9a5ebd022131ba21453cf7b8a7fd3631f45bbf52bed612"}, + {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc0521cce2c1d541634b19f3ac661d7a64f9555135e9d8af3980965be717fd4a"}, + {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffd394c7896ed7821a6d13b24657c6a34b6e2650bd84ae063cf11ccffa4f1a97"}, + {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:e8a0772016feeb106efd28d4a328e77dc2edae84dfbac06061319fdb669ff828"}, + {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8f35c17bd4faed2bc7797d2a66cbb4f986242ce2e30340ab832e5d99ae60e011"}, + {file = "cryptography-39.0.2-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b49a88ff802e1993b7f749b1eeb31134f03c8d5c956e3c125c75558955cda536"}, + {file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:5f8c682e736513db7d04349b4f6693690170f95aac449c56f97415c6980edef5"}, + {file = "cryptography-39.0.2-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:d7d84a512a59f4412ca8549b01f94be4161c94efc598bf09d027d67826beddc0"}, + {file = "cryptography-39.0.2-cp36-abi3-win32.whl", hash = "sha256:c43ac224aabcbf83a947eeb8b17eaf1547bce3767ee2d70093b461f31729a480"}, + {file = "cryptography-39.0.2-cp36-abi3-win_amd64.whl", hash = "sha256:788b3921d763ee35dfdb04248d0e3de11e3ca8eb22e2e48fef880c42e1f3c8f9"}, + {file = "cryptography-39.0.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d15809e0dbdad486f4ad0979753518f47980020b7a34e9fc56e8be4f60702fac"}, + {file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:50cadb9b2f961757e712a9737ef33d89b8190c3ea34d0fb6675e00edbe35d074"}, + {file = "cryptography-39.0.2-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:103e8f7155f3ce2ffa0049fe60169878d47a4364b277906386f8de21c9234aa1"}, + {file = "cryptography-39.0.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:6236a9610c912b129610eb1a274bdc1350b5df834d124fa84729ebeaf7da42c3"}, + {file = "cryptography-39.0.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e944fe07b6f229f4c1a06a7ef906a19652bdd9fd54c761b0ff87e83ae7a30354"}, + {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:35d658536b0a4117c885728d1a7032bdc9a5974722ae298d6c533755a6ee3915"}, + {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:30b1d1bfd00f6fc80d11300a29f1d8ab2b8d9febb6ed4a38a76880ec564fae84"}, + {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:e029b844c21116564b8b61216befabca4b500e6816fa9f0ba49527653cae2108"}, + {file = "cryptography-39.0.2-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fa507318e427169ade4e9eccef39e9011cdc19534f55ca2f36ec3f388c1f70f3"}, + {file = "cryptography-39.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:8bc0008ef798231fac03fe7d26e82d601d15bd16f3afaad1c6113771566570f3"}, + {file = "cryptography-39.0.2.tar.gz", hash = "sha256:bc5b871e977c8ee5a1bbc42fa8d19bcc08baf0c51cbf1586b0e87a2694dde42f"}, ] [package.dependencies] From e7b559d2ca3d8ea11c32946bd1607078dc2873f8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Tue, 14 Mar 2023 08:18:49 -0400 Subject: [PATCH 311/344] Avoid unneeded work if auto-join rooms aren't configured. (#15262) It is not necessary to reach out to the database to check some parameters if the auto-join rooms are not configured, or (in some cases) if auto-create rooms is not configured. --- changelog.d/15262.misc | 1 + synapse/handlers/register.py | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15262.misc diff --git a/changelog.d/15262.misc b/changelog.d/15262.misc new file mode 100644 index 000000000000..d519f151c454 --- /dev/null +++ b/changelog.d/15262.misc @@ -0,0 +1 @@ +Skip processing of auto-join room behaviour if there are not auto-join rooms configured. diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index e4e506e62c5e..6b110dcb6e4d 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -596,14 +596,20 @@ async def _auto_join_rooms(self, user_id: str) -> None: Args: user_id: The user to join """ + # If there are no rooms to auto-join, just bail. + if not self.hs.config.registration.auto_join_rooms: + return + # auto-join the user to any rooms we're supposed to dump them into # try to create the room if we're the first real user on the server. Note # that an auto-generated support or bot user is not a real user and will never be # the user to create the room should_auto_create_rooms = False - is_real_user = await self.store.is_real_user(user_id) - if self.hs.config.registration.autocreate_auto_join_rooms and is_real_user: + if ( + self.hs.config.registration.autocreate_auto_join_rooms + and await self.store.is_real_user(user_id) + ): count = await self.store.count_real_users() should_auto_create_rooms = count == 1 From 8b1af08c6ed141101d127bcf9909f23b2b621510 Mon Sep 17 00:00:00 2001 From: Mathieu Velten Date: Tue, 14 Mar 2023 16:15:01 +0100 Subject: [PATCH 312/344] 1.79.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 43259f323cf7..158ef035495e 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.79.0 (2023-03-14) +=========================== + +No significant changes since 1.79.0rc2. + + Synapse 1.79.0rc2 (2023-03-13) ============================== diff --git a/debian/changelog b/debian/changelog index a91521f6b235..10f4815d351d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.79.0) stable; urgency=medium + + * New Synapse release 1.79.0. + + -- Synapse Packaging team Tue, 14 Mar 2023 16:14:50 +0100 + matrix-synapse-py3 (1.79.0~rc2) stable; urgency=medium * New Synapse release 1.79.0rc2. diff --git a/pyproject.toml b/pyproject.toml index dbdc3c49913e..4dc228ee6cab 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.79.0rc2" +version = "1.79.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 003a25ae5c8f1d708964f18aa34b87fb08ca5ef4 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Tue, 14 Mar 2023 11:29:33 -0500 Subject: [PATCH 313/344] Additional functionality for declaring worker types in Complement (#14921) Co-authored-by: Olivier Wilkinson (reivilibre) --- changelog.d/14921.misc | 1 + .../complement/conf/start_for_complement.sh | 6 +- docker/configure_workers_and_start.py | 521 ++++++++++++++---- 3 files changed, 413 insertions(+), 115 deletions(-) create mode 100644 changelog.d/14921.misc diff --git a/changelog.d/14921.misc b/changelog.d/14921.misc new file mode 100644 index 000000000000..599e23eb0c9f --- /dev/null +++ b/changelog.d/14921.misc @@ -0,0 +1 @@ +Add additional functionality to declaring worker types when starting Complement in worker mode. diff --git a/docker/complement/conf/start_for_complement.sh b/docker/complement/conf/start_for_complement.sh index af13209c54e9..5560ab8b95a8 100755 --- a/docker/complement/conf/start_for_complement.sh +++ b/docker/complement/conf/start_for_complement.sh @@ -51,8 +51,7 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then # -z True if the length of string is zero. if [[ -z "$SYNAPSE_WORKER_TYPES" ]]; then export SYNAPSE_WORKER_TYPES="\ - event_persister, \ - event_persister, \ + event_persister:2, \ background_worker, \ frontend_proxy, \ event_creator, \ @@ -64,7 +63,8 @@ if [[ -n "$SYNAPSE_COMPLEMENT_USE_WORKERS" ]]; then synchrotron, \ client_reader, \ appservice, \ - pusher" + pusher, \ + stream_writers=account_data+presence+receipts+to_device+typing" fi log "Workers requested: $SYNAPSE_WORKER_TYPES" diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index add8bb1ff61e..cfb16c2e2240 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -19,8 +19,15 @@ # The environment variables it reads are: # * SYNAPSE_SERVER_NAME: The desired server_name of the homeserver. # * SYNAPSE_REPORT_STATS: Whether to report stats. -# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKER_CONFIG -# below. Leave empty for no workers. +# * SYNAPSE_WORKER_TYPES: A comma separated list of worker names as specified in WORKERS_CONFIG +# below. Leave empty for no workers. Add a ':' and a number at the end to +# multiply that worker. Append multiple worker types with '+' to merge the +# worker types into a single worker. Add a name and a '=' to the front of a +# worker type to give this instance a name in logs and nginx. +# Examples: +# SYNAPSE_WORKER_TYPES='event_persister, federation_sender, client_reader' +# SYNAPSE_WORKER_TYPES='event_persister:2, federation_sender:2, client_reader' +# SYNAPSE_WORKER_TYPES='stream_writers=account_data+presence+typing' # * SYNAPSE_AS_REGISTRATION_DIR: If specified, a directory in which .yaml and .yml files # will be treated as Application Service registration files. # * SYNAPSE_TLS_CERT: Path to a TLS certificate in PEM format. @@ -40,16 +47,33 @@ import os import platform +import re import subprocess import sys +from collections import defaultdict +from itertools import chain from pathlib import Path -from typing import Any, Dict, List, Mapping, MutableMapping, NoReturn, Optional, Set +from typing import ( + Any, + Dict, + List, + Mapping, + MutableMapping, + NoReturn, + Optional, + Set, + SupportsIndex, +) import yaml from jinja2 import Environment, FileSystemLoader MAIN_PROCESS_HTTP_LISTENER_PORT = 8080 +# A simple name used as a placeholder in the WORKERS_CONFIG below. This will be replaced +# during processing with the name of the worker. +WORKER_PLACEHOLDER_NAME = "placeholder_name" + # Workers with exposed endpoints needs either "client", "federation", or "media" listener_resources # Watching /_matrix/client needs a "client" listener # Watching /_matrix/federation needs a "federation" listener @@ -70,11 +94,13 @@ "endpoint_patterns": [ "^/_matrix/client/(api/v1|r0|v3|unstable)/user_directory/search$" ], - "shared_extra_conf": {"update_user_directory_from_worker": "user_dir1"}, + "shared_extra_conf": { + "update_user_directory_from_worker": WORKER_PLACEHOLDER_NAME + }, "worker_extra_conf": "", }, "media_repository": { - "app": "synapse.app.media_repository", + "app": "synapse.app.generic_worker", "listener_resources": ["media"], "endpoint_patterns": [ "^/_matrix/media/", @@ -87,7 +113,7 @@ # The first configured media worker will run the media background jobs "shared_extra_conf": { "enable_media_repo": False, - "media_instance_running_background_jobs": "media_repository1", + "media_instance_running_background_jobs": WORKER_PLACEHOLDER_NAME, }, "worker_extra_conf": "enable_media_repo: true", }, @@ -95,7 +121,9 @@ "app": "synapse.app.generic_worker", "listener_resources": [], "endpoint_patterns": [], - "shared_extra_conf": {"notify_appservices_from_worker": "appservice1"}, + "shared_extra_conf": { + "notify_appservices_from_worker": WORKER_PLACEHOLDER_NAME + }, "worker_extra_conf": "", }, "federation_sender": { @@ -192,9 +220,9 @@ "app": "synapse.app.generic_worker", "listener_resources": [], "endpoint_patterns": [], - # This worker cannot be sharded. Therefore there should only ever be one background - # worker, and it should be named background_worker1 - "shared_extra_conf": {"run_background_tasks_on": "background_worker1"}, + # This worker cannot be sharded. Therefore, there should only ever be one + # background worker. This is enforced for the safety of your database. + "shared_extra_conf": {"run_background_tasks_on": WORKER_PLACEHOLDER_NAME}, "worker_extra_conf": "", }, "event_creator": { @@ -275,7 +303,7 @@ """ NGINX_UPSTREAM_CONFIG_BLOCK = """ -upstream {upstream_worker_type} {{ +upstream {upstream_worker_base_name} {{ {body} }} """ @@ -326,7 +354,7 @@ def convert(src: str, dst: str, **template_vars: object) -> None: def add_worker_roles_to_shared_config( shared_config: dict, - worker_type: str, + worker_types_set: Set[str], worker_name: str, worker_port: int, ) -> None: @@ -334,22 +362,36 @@ def add_worker_roles_to_shared_config( append appropriate worker information to it for the current worker_type instance. Args: - shared_config: The config dict that all worker instances share (after being converted to YAML) - worker_type: The type of worker (one of those defined in WORKERS_CONFIG). + shared_config: The config dict that all worker instances share (after being + converted to YAML) + worker_types_set: The type of worker (one of those defined in WORKERS_CONFIG). + This list can be a single worker type or multiple. worker_name: The name of the worker instance. worker_port: The HTTP replication port that the worker instance is listening on. """ - # The instance_map config field marks the workers that write to various replication streams + # The instance_map config field marks the workers that write to various replication + # streams instance_map = shared_config.setdefault("instance_map", {}) - # Worker-type specific sharding config - if worker_type == "pusher": + # This is a list of the stream_writers that there can be only one of. Events can be + # sharded, and therefore doesn't belong here. + singular_stream_writers = [ + "account_data", + "presence", + "receipts", + "to_device", + "typing", + ] + + # Worker-type specific sharding config. Now a single worker can fulfill multiple + # roles, check each. + if "pusher" in worker_types_set: shared_config.setdefault("pusher_instances", []).append(worker_name) - elif worker_type == "federation_sender": + if "federation_sender" in worker_types_set: shared_config.setdefault("federation_sender_instances", []).append(worker_name) - elif worker_type == "event_persister": + if "event_persister" in worker_types_set: # Event persisters write to the events stream, so we need to update # the list of event stream writers shared_config.setdefault("stream_writers", {}).setdefault("events", []).append( @@ -362,19 +404,154 @@ def add_worker_roles_to_shared_config( "port": worker_port, } - elif worker_type in ["account_data", "presence", "receipts", "to_device", "typing"]: - # Update the list of stream writers - # It's convenient that the name of the worker type is the same as the stream to write - shared_config.setdefault("stream_writers", {}).setdefault( - worker_type, [] - ).append(worker_name) + # Update the list of stream writers. It's convenient that the name of the worker + # type is the same as the stream to write. Iterate over the whole list in case there + # is more than one. + for worker in worker_types_set: + if worker in singular_stream_writers: + shared_config.setdefault("stream_writers", {}).setdefault( + worker, [] + ).append(worker_name) + + # Map of stream writer instance names to host/ports combos + # For now, all stream writers need http replication ports + instance_map[worker_name] = { + "host": "localhost", + "port": worker_port, + } + + +def merge_worker_template_configs( + existing_dict: Dict[str, Any] | None, + to_be_merged_dict: Dict[str, Any], +) -> Dict[str, Any]: + """When given an existing dict of worker template configuration consisting with both + dicts and lists, merge new template data from WORKERS_CONFIG(or create) and + return new dict. - # Map of stream writer instance names to host/ports combos - # For now, all stream writers need http replication ports - instance_map[worker_name] = { - "host": "localhost", - "port": worker_port, - } + Args: + existing_dict: Either an existing worker template or a fresh blank one. + to_be_merged_dict: The template from WORKERS_CONFIGS to be merged into + existing_dict. + Returns: The newly merged together dict values. + """ + new_dict: Dict[str, Any] = {} + if not existing_dict: + # It doesn't exist yet, just use the new dict(but take a copy not a reference) + new_dict = to_be_merged_dict.copy() + else: + for i in to_be_merged_dict.keys(): + if (i == "endpoint_patterns") or (i == "listener_resources"): + # merge the two lists, remove duplicates + new_dict[i] = list(set(existing_dict[i] + to_be_merged_dict[i])) + elif i == "shared_extra_conf": + # merge dictionary's, the worker name will be replaced later + new_dict[i] = {**existing_dict[i], **to_be_merged_dict[i]} + elif i == "worker_extra_conf": + # There is only one worker type that has a 'worker_extra_conf' and it is + # the media_repo. Since duplicate worker types on the same worker don't + # work, this is fine. + new_dict[i] = existing_dict[i] + to_be_merged_dict[i] + else: + # Everything else should be identical, like "app", which only works + # because all apps are now generic_workers. + new_dict[i] = to_be_merged_dict[i] + return new_dict + + +def insert_worker_name_for_worker_config( + existing_dict: Dict[str, Any], worker_name: str +) -> Dict[str, Any]: + """Insert a given worker name into the worker's configuration dict. + + Args: + existing_dict: The worker_config dict that is imported into shared_config. + worker_name: The name of the worker to insert. + Returns: Copy of the dict with newly inserted worker name + """ + dict_to_edit = existing_dict.copy() + for k, v in dict_to_edit["shared_extra_conf"].items(): + # Only proceed if it's the placeholder name string + if v == WORKER_PLACEHOLDER_NAME: + dict_to_edit["shared_extra_conf"][k] = worker_name + return dict_to_edit + + +def apply_requested_multiplier_for_worker(worker_types: List[str]) -> List[str]: + """ + Apply multiplier(if found) by returning a new expanded list with some basic error + checking. + + Args: + worker_types: The unprocessed List of requested workers + Returns: + A new list with all requested workers expanded. + """ + # Checking performed: + # 1. if worker:2 or more is declared, it will create additional workers up to number + # 2. if worker:1, it will create a single copy of this worker as if no number was + # given + # 3. if worker:0 is declared, this worker will be ignored. This is to allow for + # scripting and automated expansion and is intended behaviour. + # 4. if worker:NaN or is a negative number, it will error and log it. + new_worker_types = [] + for worker_type in worker_types: + if ":" in worker_type: + worker_type_components = split_and_strip_string(worker_type, ":", 1) + worker_count = 0 + # Should only be 2 components, a type of worker(s) and an integer as a + # string. Cast the number as an int then it can be used as a counter. + try: + worker_count = int(worker_type_components[1]) + except ValueError: + error( + f"Bad number in worker count for '{worker_type}': " + f"'{worker_type_components[1]}' is not an integer" + ) + + # As long as there are more than 0, we add one to the list to make below. + for _ in range(worker_count): + new_worker_types.append(worker_type_components[0]) + + else: + # If it's not a real worker_type, it will error out later. + new_worker_types.append(worker_type) + return new_worker_types + + +def is_sharding_allowed_for_worker_type(worker_type: str) -> bool: + """Helper to check to make sure worker types that cannot have multiples do not. + + Args: + worker_type: The type of worker to check against. + Returns: True if allowed, False if not + """ + return worker_type not in [ + "background_worker", + "account_data", + "presence", + "receipts", + "typing", + "to_device", + ] + + +def split_and_strip_string( + given_string: str, split_char: str, max_split: SupportsIndex = -1 +) -> List[str]: + """ + Helper to split a string on split_char and strip whitespace from each end of each + element. + Args: + given_string: The string to split + split_char: The character to split the string on + max_split: kwarg for split() to limit how many times the split() happens + Returns: + A List of strings + """ + # Removes whitespace from ends of result strings before adding to list. Allow for + # overriding 'maxsplit' kwarg, default being -1 to signify no maximum. + return [x.strip() for x in given_string.split(split_char, maxsplit=max_split)] def generate_base_homeserver_config() -> None: @@ -389,29 +566,153 @@ def generate_base_homeserver_config() -> None: subprocess.run(["/usr/local/bin/python", "/start.py", "migrate_config"], check=True) +def parse_worker_types( + requested_worker_types: List[str], +) -> Dict[str, Set[str]]: + """Read the desired list of requested workers and prepare the data for use in + generating worker config files while also checking for potential gotchas. + + Args: + requested_worker_types: The list formed from the split environment variable + containing the unprocessed requests for workers. + + Returns: A dict of worker names to set of worker types. Format: + {'worker_name': + {'worker_type', 'worker_type2'} + } + """ + # A counter of worker_base_name -> int. Used for determining the name for a given + # worker when generating its config file, as each worker's name is just + # worker_base_name followed by instance number + worker_base_name_counter: Dict[str, int] = defaultdict(int) + + # Similar to above, but more finely grained. This is used to determine we don't have + # more than a single worker for cases where multiples would be bad(e.g. presence). + worker_type_shard_counter: Dict[str, int] = defaultdict(int) + + # The final result of all this processing + dict_to_return: Dict[str, Set[str]] = {} + + # Handle any multipliers requested for given workers. + multiple_processed_worker_types = apply_requested_multiplier_for_worker( + requested_worker_types + ) + + # Process each worker_type_string + # Examples of expected formats: + # - requested_name=type1+type2+type3 + # - synchrotron + # - event_creator+event_persister + for worker_type_string in multiple_processed_worker_types: + # First, if a name is requested, use that — otherwise generate one. + worker_base_name: str = "" + if "=" in worker_type_string: + # Split on "=", remove extra whitespace from ends then make list + worker_type_split = split_and_strip_string(worker_type_string, "=") + if len(worker_type_split) > 2: + error( + "There should only be one '=' in the worker type string. " + f"Please fix: {worker_type_string}" + ) + + # Assign the name + worker_base_name = worker_type_split[0] + + if not re.match(r"^[a-zA-Z0-9_+-]*[a-zA-Z_+-]$", worker_base_name): + # Apply a fairly narrow regex to the worker names. Some characters + # aren't safe for use in file paths or nginx configurations. + # Don't allow to end with a number because we'll add a number + # ourselves in a moment. + error( + "Invalid worker name; please choose a name consisting of " + "alphanumeric letters, _ + -, but not ending with a digit: " + f"{worker_base_name!r}" + ) + + # Continue processing the remainder of the worker_type string + # with the name override removed. + worker_type_string = worker_type_split[1] + + # Split the worker_type_string on "+", remove whitespace from ends then make + # the list a set so it's deduplicated. + worker_types_set: Set[str] = set( + split_and_strip_string(worker_type_string, "+") + ) + + if not worker_base_name: + # No base name specified: generate one deterministically from set of + # types + worker_base_name = "+".join(sorted(worker_types_set)) + + # At this point, we have: + # worker_base_name which is the name for the worker, without counter. + # worker_types_set which is the set of worker types for this worker. + + # Validate worker_type and make sure we don't allow sharding for a worker type + # that doesn't support it. Will error and stop if it is a problem, + # e.g. 'background_worker'. + for worker_type in worker_types_set: + # Verify this is a real defined worker type. If it's not, stop everything so + # it can be fixed. + if worker_type not in WORKERS_CONFIG: + error( + f"{worker_type} is an unknown worker type! Was found in " + f"'{worker_type_string}'. Please fix!" + ) + + if worker_type in worker_type_shard_counter: + if not is_sharding_allowed_for_worker_type(worker_type): + error( + f"There can be only a single worker with {worker_type} " + "type. Please recount and remove." + ) + # Not in shard counter, must not have seen it yet, add it. + worker_type_shard_counter[worker_type] += 1 + + # Generate the number for the worker using incrementing counter + worker_base_name_counter[worker_base_name] += 1 + worker_number = worker_base_name_counter[worker_base_name] + worker_name = f"{worker_base_name}{worker_number}" + + if worker_number > 1: + # If this isn't the first worker, check that we don't have a confusing + # mixture of worker types with the same base name. + first_worker_with_base_name = dict_to_return[f"{worker_base_name}1"] + if first_worker_with_base_name != worker_types_set: + error( + f"Can not use worker_name: '{worker_name}' for worker_type(s): " + f"{worker_types_set!r}. It is already in use by " + f"worker_type(s): {first_worker_with_base_name!r}" + ) + + dict_to_return[worker_name] = worker_types_set + + return dict_to_return + + def generate_worker_files( - environ: Mapping[str, str], config_path: str, data_dir: str + environ: Mapping[str, str], + config_path: str, + data_dir: str, + requested_worker_types: Dict[str, Set[str]], ) -> None: - """Read the desired list of workers from environment variables and generate - shared homeserver, nginx and supervisord configs. + """Read the desired workers(if any) that is passed in and generate shared + homeserver, nginx and supervisord configs. Args: environ: os.environ instance. config_path: The location of the generated Synapse main worker config file. data_dir: The location of the synapse data directory. Where log and user-facing config files live. + requested_worker_types: A Dict containing requested workers in the format of + {'worker_name1': {'worker_type', ...}} """ # Note that yaml cares about indentation, so care should be taken to insert lines # into files at the correct indentation below. - # shared_config is the contents of a Synapse config file that will be shared amongst - # the main Synapse process as well as all workers. - # It is intended mainly for disabling functionality when certain workers are spun up, - # and adding a replication listener. - - # First read the original config file and extract the listeners block. Then we'll add - # another listener for replication. Later we'll write out the result to the shared - # config file. + # First read the original config file and extract the listeners block. Then we'll + # add another listener for replication. Later we'll write out the result to the + # shared config file. listeners = [ { "port": 9093, @@ -427,9 +728,9 @@ def generate_worker_files( listeners += original_listeners # The shared homeserver config. The contents of which will be inserted into the - # base shared worker jinja2 template. - # - # This config file will be passed to all workers, included Synapse's main process. + # base shared worker jinja2 template. This config file will be passed to all + # workers, included Synapse's main process. It is intended mainly for disabling + # functionality when certain workers are spun up, and adding a replication listener. shared_config: Dict[str, Any] = {"listeners": listeners} # List of dicts that describe workers. @@ -437,31 +738,20 @@ def generate_worker_files( # program blocks. worker_descriptors: List[Dict[str, Any]] = [] - # Upstreams for load-balancing purposes. This dict takes the form of a worker type to the - # ports of each worker. For example: + # Upstreams for load-balancing purposes. This dict takes the form of the worker + # type to the ports of each worker. For example: # { # worker_type: {1234, 1235, ...}} # } # and will be used to construct 'upstream' nginx directives. nginx_upstreams: Dict[str, Set[int]] = {} - # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be - # placed after the proxy_pass directive. The main benefit to representing this data as a - # dict over a str is that we can easily deduplicate endpoints across multiple instances - # of the same worker. - # - # An nginx site config that will be amended to depending on the workers that are - # spun up. To be placed in /etc/nginx/conf.d. - nginx_locations = {} - - # Read the desired worker configuration from the environment - worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip() - if not worker_types_env: - # No workers, just the main process - worker_types = [] - else: - # Split type names by comma, ignoring whitespace. - worker_types = [x.strip() for x in worker_types_env.split(",")] + # A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what + # will be placed after the proxy_pass directive. The main benefit to representing + # this data as a dict over a str is that we can easily deduplicate endpoints + # across multiple instances of the same worker. The final rendering will be combined + # with nginx_upstreams and placed in /etc/nginx/conf.d. + nginx_locations: Dict[str, str] = {} # Create the worker configuration directory if it doesn't already exist os.makedirs("/conf/workers", exist_ok=True) @@ -469,66 +759,57 @@ def generate_worker_files( # Start worker ports from this arbitrary port worker_port = 18009 - # A counter of worker_type -> int. Used for determining the name for a given - # worker type when generating its config file, as each worker's name is just - # worker_type + instance # - worker_type_counter: Dict[str, int] = {} - # A list of internal endpoints to healthcheck, starting with the main process # which exists even if no workers do. healthcheck_urls = ["http://localhost:8080/health"] - # For each worker type specified by the user, create config values - for worker_type in worker_types: - worker_config = WORKERS_CONFIG.get(worker_type) - if worker_config: - worker_config = worker_config.copy() - else: - error(worker_type + " is an unknown worker type! Please fix!") + # Get the set of all worker types that we have configured + all_worker_types_in_use = set(chain(*requested_worker_types.values())) + # Map locations to upstreams (corresponding to worker types) in Nginx + # but only if we use the appropriate worker type + for worker_type in all_worker_types_in_use: + for endpoint_pattern in WORKERS_CONFIG[worker_type]["endpoint_patterns"]: + nginx_locations[endpoint_pattern] = f"http://{worker_type}" + + # For each worker type specified by the user, create config values and write it's + # yaml config file + for worker_name, worker_types_set in requested_worker_types.items(): + # The collected and processed data will live here. + worker_config: Dict[str, Any] = {} + + # Merge all worker config templates for this worker into a single config + for worker_type in worker_types_set: + copy_of_template_config = WORKERS_CONFIG[worker_type].copy() + + # Merge worker type template configuration data. It's a combination of lists + # and dicts, so use this helper. + worker_config = merge_worker_template_configs( + worker_config, copy_of_template_config + ) + + # Replace placeholder names in the config template with the actual worker name. + worker_config = insert_worker_name_for_worker_config(worker_config, worker_name) - new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1 - worker_type_counter[worker_type] = new_worker_count - - # Name workers by their type concatenated with an incrementing number - # e.g. federation_reader1 - worker_name = worker_type + str(new_worker_count) worker_config.update( {"name": worker_name, "port": str(worker_port), "config_path": config_path} ) - # Update the shared config with any worker-type specific options - shared_config.update(worker_config["shared_extra_conf"]) + # Update the shared config with any worker_type specific options. The first of a + # given worker_type needs to stay assigned and not be replaced. + worker_config["shared_extra_conf"].update(shared_config) + shared_config = worker_config["shared_extra_conf"] healthcheck_urls.append("http://localhost:%d/health" % (worker_port,)) - # Check if more than one instance of this worker type has been specified - worker_type_total_count = worker_types.count(worker_type) - # Update the shared config with sharding-related options if necessary add_worker_roles_to_shared_config( - shared_config, worker_type, worker_name, worker_port + shared_config, worker_types_set, worker_name, worker_port ) # Enable the worker in supervisord worker_descriptors.append(worker_config) - # Add nginx location blocks for this worker's endpoints (if any are defined) - for pattern in worker_config["endpoint_patterns"]: - # Determine whether we need to load-balance this worker - if worker_type_total_count > 1: - # Create or add to a load-balanced upstream for this worker - nginx_upstreams.setdefault(worker_type, set()).add(worker_port) - - # Upstreams are named after the worker_type - upstream = "http://" + worker_type - else: - upstream = "http://localhost:%d" % (worker_port,) - - # Note that this endpoint should proxy to this upstream - nginx_locations[pattern] = upstream - # Write out the worker's logging config file - log_config_filepath = generate_worker_log_config(environ, worker_name, data_dir) # Then a worker config file @@ -539,6 +820,10 @@ def generate_worker_files( worker_log_config_filepath=log_config_filepath, ) + # Save this worker's port number to the correct nginx upstreams + for worker_type in worker_types_set: + nginx_upstreams.setdefault(worker_type, set()).add(worker_port) + worker_port += 1 # Build the nginx location config blocks @@ -551,15 +836,14 @@ def generate_worker_files( # Determine the load-balancing upstreams to configure nginx_upstream_config = "" - - for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items(): + for upstream_worker_base_name, upstream_worker_ports in nginx_upstreams.items(): body = "" for port in upstream_worker_ports: - body += " server localhost:%d;\n" % (port,) + body += f" server localhost:{port};\n" # Add to the list of configured upstreams nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format( - upstream_worker_type=upstream_worker_type, + upstream_worker_base_name=upstream_worker_base_name, body=body, ) @@ -580,7 +864,7 @@ def generate_worker_files( if reg_path.suffix.lower() in (".yaml", ".yml") ] - workers_in_use = len(worker_types) > 0 + workers_in_use = len(requested_worker_types) > 0 # Shared homeserver config convert( @@ -678,13 +962,26 @@ def main(args: List[str], environ: MutableMapping[str, str]) -> None: generate_base_homeserver_config() else: log("Base homeserver config exists—not regenerating") - # This script may be run multiple times (mostly by Complement, see note at top of file). - # Don't re-configure workers in this instance. + # This script may be run multiple times (mostly by Complement, see note at top of + # file). Don't re-configure workers in this instance. mark_filepath = "/conf/workers_have_been_configured" if not os.path.exists(mark_filepath): + # Collect and validate worker_type requests + # Read the desired worker configuration from the environment + worker_types_env = environ.get("SYNAPSE_WORKER_TYPES", "").strip() + # Only process worker_types if they exist + if not worker_types_env: + # No workers, just the main process + worker_types = [] + requested_worker_types: Dict[str, Any] = {} + else: + # Split type names by comma, ignoring whitespace. + worker_types = split_and_strip_string(worker_types_env, ",") + requested_worker_types = parse_worker_types(worker_types) + # Always regenerate all other config files log("Generating worker config files") - generate_worker_files(environ, config_path, data_dir) + generate_worker_files(environ, config_path, data_dir, requested_worker_types) # Mark workers as being configured with open(mark_filepath, "w") as f: From d0fe417f5c16db298fc56f0f5ebd32eeb0d6cf44 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 14 Mar 2023 17:32:46 +0000 Subject: [PATCH 314/344] Remove unused store method `_set_destination_retry_timings_emulated`. (#15266) --- changelog.d/15266.misc | 1 + .../storage/databases/main/transactions.py | 56 +------------------ 2 files changed, 3 insertions(+), 54 deletions(-) create mode 100644 changelog.d/15266.misc diff --git a/changelog.d/15266.misc b/changelog.d/15266.misc new file mode 100644 index 000000000000..285b72cdd159 --- /dev/null +++ b/changelog.d/15266.misc @@ -0,0 +1 @@ +Remove unused store method `_set_destination_retry_timings_emulated`. \ No newline at end of file diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 6d72bd9f67f2..c3bd36efc9c2 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -224,7 +224,7 @@ async def set_destination_retry_timings( await self.db_pool.runInteraction( "set_destination_retry_timings", - self._set_destination_retry_timings_native, + self._set_destination_retry_timings_txn, destination, failure_ts, retry_last_ts, @@ -232,7 +232,7 @@ async def set_destination_retry_timings( db_autocommit=True, # Safe as it's a single upsert ) - def _set_destination_retry_timings_native( + def _set_destination_retry_timings_txn( self, txn: LoggingTransaction, destination: str, @@ -266,58 +266,6 @@ def _set_destination_retry_timings_native( txn, self.get_destination_retry_timings, (destination,) ) - def _set_destination_retry_timings_emulated( - self, - txn: LoggingTransaction, - destination: str, - failure_ts: Optional[int], - retry_last_ts: int, - retry_interval: int, - ) -> None: - self.database_engine.lock_table(txn, "destinations") - - # We need to be careful here as the data may have changed from under us - # due to a worker setting the timings. - - prev_row = self.db_pool.simple_select_one_txn( - txn, - table="destinations", - keyvalues={"destination": destination}, - retcols=("failure_ts", "retry_last_ts", "retry_interval"), - allow_none=True, - ) - - if not prev_row: - self.db_pool.simple_insert_txn( - txn, - table="destinations", - values={ - "destination": destination, - "failure_ts": failure_ts, - "retry_last_ts": retry_last_ts, - "retry_interval": retry_interval, - }, - ) - elif ( - retry_interval == 0 - or prev_row["retry_interval"] is None - or prev_row["retry_interval"] < retry_interval - ): - self.db_pool.simple_update_one_txn( - txn, - "destinations", - keyvalues={"destination": destination}, - updatevalues={ - "failure_ts": failure_ts, - "retry_last_ts": retry_last_ts, - "retry_interval": retry_interval, - }, - ) - - self._invalidate_cache_and_stream( - txn, self.get_destination_retry_timings, (destination,) - ) - async def store_destination_rooms_entries( self, destinations: Iterable[str], From 63d87c08c8e9acedb64ef6ee135acda34e4e370d Mon Sep 17 00:00:00 2001 From: reivilibre Date: Wed, 15 Mar 2023 09:25:58 +0000 Subject: [PATCH 315/344] Add schema comments about the `destinations` and `destination_rooms` tables. (#15247) --- changelog.d/15247.misc | 1 + .../74/90COMMENTS_destinations.sql.postgres | 52 +++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 changelog.d/15247.misc create mode 100644 synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres diff --git a/changelog.d/15247.misc b/changelog.d/15247.misc new file mode 100644 index 000000000000..6e2ce1d4d8bd --- /dev/null +++ b/changelog.d/15247.misc @@ -0,0 +1 @@ +Add schema comments about the `destinations` and `destination_rooms` tables. \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres b/synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres new file mode 100644 index 000000000000..cc7dda1a1136 --- /dev/null +++ b/synapse/storage/schema/main/delta/74/90COMMENTS_destinations.sql.postgres @@ -0,0 +1,52 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +--- destinations +COMMENT ON TABLE destinations IS + 'Information about remote homeservers and the health of our connection to them.'; + +COMMENT ON COLUMN destinations.destination IS 'server name of remote homeserver in question'; + +COMMENT ON COLUMN destinations.last_successful_stream_ordering IS +$$Stream ordering of the most recently successfully sent PDU to this server, sent through normal send (not e.g. backfill). +In Catch-Up Mode, the original PDU persisted by us is represented here, even if we sent a later forward extremity in its stead. +See `destination_rooms` for more information about catch-up.$$; + +COMMENT ON COLUMN destinations.retry_last_ts IS +$$The last time we tried and failed to reach the remote server, in ms. +This field is reset to `0` when we succeed in connecting again.$$; + +COMMENT ON COLUMN destinations.retry_interval IS +$$How long, in milliseconds, to wait since the last time we tried to reach the remote server before trying again. +This field is reset to `0` when we succeed in connecting again.$$; + +COMMENT ON COLUMN destinations.failure_ts IS +$$The first time we tried and failed to reach the remote server, in ms. +This field is reset to `NULL` when we succeed in connecting again.$$; + + + +--- destination_rooms +COMMENT ON TABLE destination_rooms IS + 'Information about transmission of PDUs in a given room to a given remote homeserver.'; + +COMMENT ON COLUMN destination_rooms.destination IS 'server name of remote homeserver in question'; + +COMMENT ON COLUMN destination_rooms.room_id IS 'room ID in question'; + +COMMENT ON COLUMN destination_rooms.stream_ordering IS +$$`stream_ordering` of the most recent PDU in this room that needs to be sent (by us) to this homeserver. +This can only be pointing to our own PDU because we are only responsible for sending our own PDUs.$$; From 121fce7500329be61eac46eaa63bb3a0c1c54344 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 15 Mar 2023 08:07:20 -0400 Subject: [PATCH 316/344] Enable running tests & release artifacts on merge queue. (#15244) --- .github/workflows/release-artifacts.yml | 4 +++- .github/workflows/tests.yml | 1 + changelog.d/15244.misc | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15244.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index bf57bcab6104..ebd7d298a9e9 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -4,13 +4,15 @@ name: Build release artifacts on: # we build on PRs and develop to (hopefully) get early warning - # of things breaking (but only build one set of debs) + # of things breaking (but only build one set of debs). PRs skip + # building wheels on macOS & ARM. pull_request: push: branches: ["develop", "release-*"] # we do the full build on tags. tags: ["v*"] + merge_group: workflow_dispatch: concurrency: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 806bd2bfa440..d2b1d75536bf 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -4,6 +4,7 @@ on: push: branches: ["develop", "release-*"] pull_request: + merge_group: workflow_dispatch: concurrency: diff --git a/changelog.d/15244.misc b/changelog.d/15244.misc new file mode 100644 index 000000000000..dacdcf4d5d6f --- /dev/null +++ b/changelog.d/15244.misc @@ -0,0 +1 @@ +Configure GitHub Actions for merge queues. From 3bf973edc7ecf911daab6d8c58d1264891f3ed39 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Wed, 15 Mar 2023 15:42:20 -0400 Subject: [PATCH 317/344] Remove unused class: DirectTcpReplicationClientFactory. (#15272) --- changelog.d/15272.misc | 1 + synapse/replication/tcp/client.py | 51 ------------------------------- 2 files changed, 1 insertion(+), 51 deletions(-) create mode 100644 changelog.d/15272.misc diff --git a/changelog.d/15272.misc b/changelog.d/15272.misc new file mode 100644 index 000000000000..7a3ef323e9aa --- /dev/null +++ b/changelog.d/15272.misc @@ -0,0 +1 @@ +Remove unused class `DirectTcpReplicationClientFactory`. diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 424854efbe52..200f667fdf27 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -18,16 +18,12 @@ from twisted.internet import defer from twisted.internet.defer import Deferred -from twisted.internet.interfaces import IAddress, IConnector -from twisted.internet.protocol import ReconnectingClientFactory -from twisted.python.failure import Failure from synapse.api.constants import EventTypes, Membership, ReceiptTypes from synapse.federation import send_queue from synapse.federation.sender import FederationSender from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol from synapse.replication.tcp.streams import ( AccountDataStream, DeviceListsStream, @@ -53,7 +49,6 @@ from synapse.util.metrics import Measure if TYPE_CHECKING: - from synapse.replication.tcp.handler import ReplicationCommandHandler from synapse.server import HomeServer logger = logging.getLogger(__name__) @@ -62,52 +57,6 @@ _WAIT_FOR_REPLICATION_TIMEOUT_SECONDS = 5 -class DirectTcpReplicationClientFactory(ReconnectingClientFactory): - """Factory for building connections to the master. Will reconnect if the - connection is lost. - - Accepts a handler that is passed to `ClientReplicationStreamProtocol`. - """ - - initialDelay = 0.1 - maxDelay = 1 # Try at least once every N seconds - - def __init__( - self, - hs: "HomeServer", - client_name: str, - command_handler: "ReplicationCommandHandler", - ): - self.client_name = client_name - self.command_handler = command_handler - self.server_name = hs.config.server.server_name - self.hs = hs - self._clock = hs.get_clock() # As self.clock is defined in super class - - hs.get_reactor().addSystemEventTrigger("before", "shutdown", self.stopTrying) - - def startedConnecting(self, connector: IConnector) -> None: - logger.info("Connecting to replication: %r", connector.getDestination()) - - def buildProtocol(self, addr: IAddress) -> ClientReplicationStreamProtocol: - logger.info("Connected to replication: %r", addr) - return ClientReplicationStreamProtocol( - self.hs, - self.client_name, - self.server_name, - self._clock, - self.command_handler, - ) - - def clientConnectionLost(self, connector: IConnector, reason: Failure) -> None: - logger.error("Lost replication conn: %r", reason) - ReconnectingClientFactory.clientConnectionLost(self, connector, reason) - - def clientConnectionFailed(self, connector: IConnector, reason: Failure) -> None: - logger.error("Failed to connect to replication: %r", reason) - ReconnectingClientFactory.clientConnectionFailed(self, connector, reason) - - class ReplicationDataHandler: """Handles incoming stream updates from replication. From f54f877f273b7115777b93524983ea7455be5919 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 16 Mar 2023 09:55:19 +0000 Subject: [PATCH 318/344] Preparatory work to fix the user directory assuming that any remote membership state events represent a profile change. [rei:userdirpriv] (#14755) * Remove special-case method for new memberships only, use more generic method * Only collect profiles from state events in public rooms * Add a table to track stale remote user profiles * Add store methods to set and delete rows in this new table * Mark remote profiles as stale when a member state event comes in to a private room * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * Simplify by removing Optionality of `event_id` * Replace names and avatars with None if they're set to dodgy things I think this makes more sense anyway. * Move schema delta to 74 (I missed the boat?) * Turns out these can be None after all --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/14755.bugfix | 1 + synapse/handlers/user_directory.py | 81 +++++++++++-------- .../storage/databases/main/user_directory.py | 40 +++++++++ .../01_user_directory_stale_remote_users.sql | 39 +++++++++ 4 files changed, 127 insertions(+), 34 deletions(-) create mode 100644 changelog.d/14755.bugfix create mode 100644 synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql diff --git a/changelog.d/14755.bugfix b/changelog.d/14755.bugfix new file mode 100644 index 000000000000..12f979e9d041 --- /dev/null +++ b/changelog.d/14755.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. \ No newline at end of file diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 3610b6bf785e..0815be79fa54 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -28,6 +28,11 @@ logger = logging.getLogger(__name__) +# Don't refresh a stale user directory entry, using a Federation /profile request, +# for 60 seconds. This gives time for other state events to arrive (which will +# then be coalesced such that only one /profile request is made). +USER_DIRECTORY_STALE_REFRESH_TIME_MS = 60 * 1000 + class UserDirectoryHandler(StateDeltasHandler): """Handles queries and updates for the user_directory. @@ -200,8 +205,8 @@ async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: typ = delta["type"] state_key = delta["state_key"] room_id = delta["room_id"] - event_id = delta["event_id"] - prev_event_id = delta["prev_event_id"] + event_id: Optional[str] = delta["event_id"] + prev_event_id: Optional[str] = delta["prev_event_id"] logger.debug("Handling: %r %r, %s", typ, state_key, event_id) @@ -297,8 +302,8 @@ async def _handle_room_publicity_change( async def _handle_room_membership_event( self, room_id: str, - prev_event_id: str, - event_id: str, + prev_event_id: Optional[str], + event_id: Optional[str], state_key: str, ) -> None: """Process a single room membershp event. @@ -348,7 +353,8 @@ async def _handle_room_membership_event( # Handle any profile changes for remote users. # (For local users the rest of the application calls # `handle_local_profile_change`.) - if is_remote: + # Only process if there is an event_id. + if is_remote and event_id is not None: await self._handle_possible_remote_profile_change( state_key, room_id, prev_event_id, event_id ) @@ -356,29 +362,13 @@ async def _handle_room_membership_event( # This may be the first time we've seen a remote user. If # so, ensure we have a directory entry for them. (For local users, # the rest of the application calls `handle_local_profile_change`.) - if is_remote: - await self._upsert_directory_entry_for_remote_user(state_key, event_id) + # Only process if there is an event_id. + if is_remote and event_id is not None: + await self._handle_possible_remote_profile_change( + state_key, room_id, None, event_id + ) await self._track_user_joined_room(room_id, state_key) - async def _upsert_directory_entry_for_remote_user( - self, user_id: str, event_id: str - ) -> None: - """A remote user has just joined a room. Ensure they have an entry in - the user directory. The caller is responsible for making sure they're - remote. - """ - event = await self.store.get_event(event_id, allow_none=True) - # It isn't expected for this event to not exist, but we - # don't want the entire background process to break. - if event is None: - return - - logger.debug("Adding new user to dir, %r", user_id) - - await self.store.update_profile_in_user_dir( - user_id, event.content.get("displayname"), event.content.get("avatar_url") - ) - async def _track_user_joined_room(self, room_id: str, joining_user_id: str) -> None: """Someone's just joined a room. Update `users_in_public_rooms` or `users_who_share_private_rooms` as appropriate. @@ -460,14 +450,17 @@ async def _handle_possible_remote_profile_change( user_id: str, room_id: str, prev_event_id: Optional[str], - event_id: Optional[str], + event_id: str, ) -> None: """Check member event changes for any profile changes and update the database if there are. This is intended for remote users only. The caller is responsible for checking that the given user is remote. """ - if not prev_event_id or not event_id: - return + + if not prev_event_id: + # If we don't have an older event to fall back on, just fetch the same + # event itself. + prev_event_id = event_id prev_event = await self.store.get_event(prev_event_id, allow_none=True) event = await self.store.get_event(event_id, allow_none=True) @@ -478,17 +471,37 @@ async def _handle_possible_remote_profile_change( if event.membership != Membership.JOIN: return + is_public = await self.store.is_room_world_readable_or_publicly_joinable( + room_id + ) + if not is_public: + # Don't collect user profiles from private rooms as they are not guaranteed + # to be the same as the user's global profile. + now_ts = self.clock.time_msec() + await self.store.set_remote_user_profile_in_user_dir_stale( + user_id, + next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME_MS, + retry_counter=0, + ) + return + prev_name = prev_event.content.get("displayname") new_name = event.content.get("displayname") - # If the new name is an unexpected form, do not update the directory. + # If the new name is an unexpected form, replace with None. if not isinstance(new_name, str): - new_name = prev_name + new_name = None prev_avatar = prev_event.content.get("avatar_url") new_avatar = event.content.get("avatar_url") - # If the new avatar is an unexpected form, do not update the directory. + # If the new avatar is an unexpected form, replace with None. if not isinstance(new_avatar, str): - new_avatar = prev_avatar + new_avatar = None - if prev_name != new_name or prev_avatar != new_avatar: + if ( + prev_name != new_name + or prev_avatar != new_avatar + or prev_event_id == event_id + ): + # Only update if something has changed, or we didn't have a previous event + # in the first place. await self.store.update_profile_in_user_dir(user_id, new_name, new_avatar) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index f16a509ac496..9cf01b7f36ba 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -54,6 +54,7 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import ( JsonDict, + UserID, UserProfile, get_domain_from_id, get_localpart_from_id, @@ -473,11 +474,42 @@ async def is_room_world_readable_or_publicly_joinable(self, room_id: str) -> boo return False + async def set_remote_user_profile_in_user_dir_stale( + self, user_id: str, next_try_at_ms: int, retry_counter: int + ) -> None: + """ + Marks a remote user as having a possibly-stale user directory profile. + + Args: + user_id: the remote user who may have a stale profile on this server. + next_try_at_ms: timestamp in ms after which the user directory profile can be + refreshed. + retry_counter: number of failures in refreshing the profile so far. Used for + exponential backoff calculations. + """ + assert not self.hs.is_mine_id( + user_id + ), "Can't mark a local user as a stale remote user." + + server_name = UserID.from_string(user_id).domain + + await self.db_pool.simple_upsert( + table="user_directory_stale_remote_users", + keyvalues={"user_id": user_id}, + values={ + "next_try_at_ts": next_try_at_ms, + "retry_counter": retry_counter, + "user_server_name": server_name, + }, + desc="set_remote_user_profile_in_user_dir_stale", + ) + async def update_profile_in_user_dir( self, user_id: str, display_name: Optional[str], avatar_url: Optional[str] ) -> None: """ Update or add a user's profile in the user directory. + If the user is remote, the profile will be marked as not stale. """ # If the display name or avatar URL are unexpected types, replace with None. display_name = non_null_str_or_none(display_name) @@ -491,6 +523,14 @@ def _update_profile_in_user_dir_txn(txn: LoggingTransaction) -> None: values={"display_name": display_name, "avatar_url": avatar_url}, ) + if not self.hs.is_mine_id(user_id): + # Remote users: Make sure the profile is not marked as stale anymore. + self.db_pool.simple_delete_txn( + txn, + table="user_directory_stale_remote_users", + keyvalues={"user_id": user_id}, + ) + # The display name that goes into the database index. index_display_name = display_name if index_display_name is not None: diff --git a/synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql b/synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql new file mode 100644 index 000000000000..dcb38f3d7b68 --- /dev/null +++ b/synapse/storage/schema/main/delta/74/01_user_directory_stale_remote_users.sql @@ -0,0 +1,39 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Table containing a list of remote users whose profiles may have changed +-- since their last update in the user directory. +CREATE TABLE user_directory_stale_remote_users ( + -- The User ID of the remote user whose profile may be stale. + user_id TEXT NOT NULL PRIMARY KEY, + + -- The server name of the user. + user_server_name TEXT NOT NULL, + + -- The timestamp (in ms) after which we should next try to request the user's + -- latest profile. + next_try_at_ts BIGINT NOT NULL, + + -- The number of retries so far. + -- 0 means we have not yet attempted to refresh the profile. + -- Used for calculating exponential backoff. + retry_counter INTEGER NOT NULL +); + +-- Create an index so we can easily query upcoming servers to try. +CREATE INDEX user_directory_stale_remote_users_next_try_idx ON user_directory_stale_remote_users(next_try_at_ts, user_server_name); + +-- Create an index so we can easily query upcoming users to try for a particular server. +CREATE INDEX user_directory_stale_remote_users_next_try_by_server_idx ON user_directory_stale_remote_users(user_server_name, next_try_at_ts); From 4953cd71dfbb1925dc4a477efd2ed48c2dfd70d6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 16 Mar 2023 10:35:31 +0000 Subject: [PATCH 319/344] Move Account Validity callbacks to a dedicated file (#15237) --- changelog.d/15237.misc | 1 + synapse/handlers/account_validity.py | 99 +++---------------- synapse/module_api/__init__.py | 18 ++-- synapse/module_api/callbacks/__init__.py | 22 +++++ .../callbacks/account_validity_callbacks.py | 93 +++++++++++++++++ synapse/rest/admin/users.py | 17 ++-- synapse/server.py | 5 + tests/rest/client/test_account.py | 5 +- 8 files changed, 154 insertions(+), 106 deletions(-) create mode 100644 changelog.d/15237.misc create mode 100644 synapse/module_api/callbacks/__init__.py create mode 100644 synapse/module_api/callbacks/account_validity_callbacks.py diff --git a/changelog.d/15237.misc b/changelog.d/15237.misc new file mode 100644 index 000000000000..9981606c3226 --- /dev/null +++ b/changelog.d/15237.misc @@ -0,0 +1 @@ +Move various module API callback registration methods to a dedicated class. \ No newline at end of file diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index 33e45e3a1136..4aa4ebf7e4a8 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -15,9 +15,7 @@ import email.mime.multipart import email.utils import logging -from typing import TYPE_CHECKING, Awaitable, Callable, List, Optional, Tuple - -from twisted.web.http import Request +from typing import TYPE_CHECKING, List, Optional, Tuple from synapse.api.errors import AuthError, StoreError, SynapseError from synapse.metrics.background_process_metrics import wrap_as_background_process @@ -30,25 +28,17 @@ logger = logging.getLogger(__name__) -# Types for callbacks to be registered via the module api -IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]] -ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable] -# Temporary hooks to allow for a transition from `/_matrix/client` endpoints -# to `/_synapse/client/account_validity`. See `register_account_validity_callbacks`. -ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable] -ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]] -ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable] - class AccountValidityHandler: def __init__(self, hs: "HomeServer"): self.hs = hs self.config = hs.config - self.store = self.hs.get_datastores().main - self.send_email_handler = self.hs.get_send_email_handler() - self.clock = self.hs.get_clock() + self.store = hs.get_datastores().main + self.send_email_handler = hs.get_send_email_handler() + self.clock = hs.get_clock() - self._app_name = self.hs.config.email.email_app_name + self._app_name = hs.config.email.email_app_name + self._module_api_callbacks = hs.get_module_api_callbacks().account_validity self._account_validity_enabled = ( hs.config.account_validity.account_validity_enabled @@ -78,69 +68,6 @@ def __init__(self, hs: "HomeServer"): if hs.config.worker.run_background_tasks: self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000) - self._is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = [] - self._on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = [] - self._on_legacy_send_mail_callback: Optional[ - ON_LEGACY_SEND_MAIL_CALLBACK - ] = None - self._on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None - - # The legacy admin requests callback isn't a protected attribute because we need - # to access it from the admin servlet, which is outside of this handler. - self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None - - def register_account_validity_callbacks( - self, - is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None, - on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None, - on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None, - on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None, - on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None, - ) -> None: - """Register callbacks from module for each hook.""" - if is_user_expired is not None: - self._is_user_expired_callbacks.append(is_user_expired) - - if on_user_registration is not None: - self._on_user_registration_callbacks.append(on_user_registration) - - # The builtin account validity feature exposes 3 endpoints (send_mail, renew, and - # an admin one). As part of moving the feature into a module, we need to change - # the path from /_matrix/client/unstable/account_validity/... to - # /_synapse/client/account_validity, because: - # - # * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix - # * the way we register servlets means that modules can't register resources - # under /_matrix/client - # - # We need to allow for a transition period between the old and new endpoints - # in order to allow for clients to update (and for emails to be processed). - # - # Once the email-account-validity module is loaded, it will take control of account - # validity by moving the rows from our `account_validity` table into its own table. - # - # Therefore, we need to allow modules (in practice just the one implementing the - # email-based account validity) to temporarily hook into the legacy endpoints so we - # can route the traffic coming into the old endpoints into the module, which is - # why we have the following three temporary hooks. - if on_legacy_send_mail is not None: - if self._on_legacy_send_mail_callback is not None: - raise RuntimeError("Tried to register on_legacy_send_mail twice") - - self._on_legacy_send_mail_callback = on_legacy_send_mail - - if on_legacy_renew is not None: - if self._on_legacy_renew_callback is not None: - raise RuntimeError("Tried to register on_legacy_renew twice") - - self._on_legacy_renew_callback = on_legacy_renew - - if on_legacy_admin_request is not None: - if self.on_legacy_admin_request_callback is not None: - raise RuntimeError("Tried to register on_legacy_admin_request twice") - - self.on_legacy_admin_request_callback = on_legacy_admin_request - async def is_user_expired(self, user_id: str) -> bool: """Checks if a user has expired against third-party modules. @@ -150,7 +77,7 @@ async def is_user_expired(self, user_id: str) -> bool: Returns: Whether the user has expired. """ - for callback in self._is_user_expired_callbacks: + for callback in self._module_api_callbacks.is_user_expired_callbacks: expired = await delay_cancellation(callback(user_id)) if expired is not None: return expired @@ -168,7 +95,7 @@ async def on_user_registration(self, user_id: str) -> None: Args: user_id: The ID of the newly registered user. """ - for callback in self._on_user_registration_callbacks: + for callback in self._module_api_callbacks.on_user_registration_callbacks: await callback(user_id) @wrap_as_background_process("send_renewals") @@ -198,8 +125,8 @@ async def send_renewal_email_to_user(self, user_id: str) -> None: """ # If a module supports sending a renewal email from here, do that, otherwise do # the legacy dance. - if self._on_legacy_send_mail_callback is not None: - await self._on_legacy_send_mail_callback(user_id) + if self._module_api_callbacks.on_legacy_send_mail_callback is not None: + await self._module_api_callbacks.on_legacy_send_mail_callback(user_id) return if not self._account_validity_renew_by_email_enabled: @@ -336,8 +263,10 @@ async def renew_account(self, renewal_token: str) -> Tuple[bool, bool, int]: """ # If a module supports triggering a renew from here, do that, otherwise do the # legacy dance. - if self._on_legacy_renew_callback is not None: - return await self._on_legacy_renew_callback(renewal_token) + if self._module_api_callbacks.on_legacy_renew_callback is not None: + return await self._module_api_callbacks.on_legacy_renew_callback( + renewal_token + ) try: ( diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 424239e3df86..595c23e78d7e 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -73,13 +73,6 @@ ON_USER_DEACTIVATION_STATUS_CHANGED_CALLBACK, ) from synapse.handlers.account_data import ON_ACCOUNT_DATA_UPDATED_CALLBACK -from synapse.handlers.account_validity import ( - IS_USER_EXPIRED_CALLBACK, - ON_LEGACY_ADMIN_REQUEST, - ON_LEGACY_RENEW_CALLBACK, - ON_LEGACY_SEND_MAIL_CALLBACK, - ON_USER_REGISTRATION_CALLBACK, -) from synapse.handlers.auth import ( CHECK_3PID_AUTH_CALLBACK, CHECK_AUTH_CALLBACK, @@ -105,6 +98,13 @@ run_in_background, ) from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.module_api.callbacks.account_validity_callbacks import ( + IS_USER_EXPIRED_CALLBACK, + ON_LEGACY_ADMIN_REQUEST, + ON_LEGACY_RENEW_CALLBACK, + ON_LEGACY_SEND_MAIL_CALLBACK, + ON_USER_REGISTRATION_CALLBACK, +) from synapse.rest.client.login import LoginResponse from synapse.storage import DataStore from synapse.storage.background_updates import ( @@ -250,6 +250,7 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None: self._push_rules_handler = hs.get_push_rules_handler() self._device_handler = hs.get_device_handler() self.custom_template_dir = hs.config.server.custom_template_directory + self._callbacks = hs.get_module_api_callbacks() try: app_name = self._hs.config.email.email_app_name @@ -271,7 +272,6 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None: self._account_data_manager = AccountDataManager(hs) self._spam_checker = hs.get_spam_checker() - self._account_validity_handler = hs.get_account_validity_handler() self._third_party_event_rules = hs.get_third_party_event_rules() self._password_auth_provider = hs.get_password_auth_provider() self._presence_router = hs.get_presence_router() @@ -332,7 +332,7 @@ def register_account_validity_callbacks( Added in Synapse v1.39.0. """ - return self._account_validity_handler.register_account_validity_callbacks( + return self._callbacks.account_validity.register_callbacks( is_user_expired=is_user_expired, on_user_registration=on_user_registration, on_legacy_send_mail=on_legacy_send_mail, diff --git a/synapse/module_api/callbacks/__init__.py b/synapse/module_api/callbacks/__init__.py new file mode 100644 index 000000000000..3d977bf6558a --- /dev/null +++ b/synapse/module_api/callbacks/__init__.py @@ -0,0 +1,22 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.module_api.callbacks.account_validity_callbacks import ( + AccountValidityModuleApiCallbacks, +) + + +class ModuleApiCallbacks: + def __init__(self) -> None: + self.account_validity = AccountValidityModuleApiCallbacks() diff --git a/synapse/module_api/callbacks/account_validity_callbacks.py b/synapse/module_api/callbacks/account_validity_callbacks.py new file mode 100644 index 000000000000..531d0c9ddcf4 --- /dev/null +++ b/synapse/module_api/callbacks/account_validity_callbacks.py @@ -0,0 +1,93 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import Awaitable, Callable, List, Optional, Tuple + +from twisted.web.http import Request + +logger = logging.getLogger(__name__) + +# Types for callbacks to be registered via the module api +IS_USER_EXPIRED_CALLBACK = Callable[[str], Awaitable[Optional[bool]]] +ON_USER_REGISTRATION_CALLBACK = Callable[[str], Awaitable] +# Temporary hooks to allow for a transition from `/_matrix/client` endpoints +# to `/_synapse/client/account_validity`. See `register_callbacks` below. +ON_LEGACY_SEND_MAIL_CALLBACK = Callable[[str], Awaitable] +ON_LEGACY_RENEW_CALLBACK = Callable[[str], Awaitable[Tuple[bool, bool, int]]] +ON_LEGACY_ADMIN_REQUEST = Callable[[Request], Awaitable] + + +class AccountValidityModuleApiCallbacks: + def __init__(self) -> None: + self.is_user_expired_callbacks: List[IS_USER_EXPIRED_CALLBACK] = [] + self.on_user_registration_callbacks: List[ON_USER_REGISTRATION_CALLBACK] = [] + self.on_legacy_send_mail_callback: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None + self.on_legacy_renew_callback: Optional[ON_LEGACY_RENEW_CALLBACK] = None + + # The legacy admin requests callback isn't a protected attribute because we need + # to access it from the admin servlet, which is outside of this handler. + self.on_legacy_admin_request_callback: Optional[ON_LEGACY_ADMIN_REQUEST] = None + + def register_callbacks( + self, + is_user_expired: Optional[IS_USER_EXPIRED_CALLBACK] = None, + on_user_registration: Optional[ON_USER_REGISTRATION_CALLBACK] = None, + on_legacy_send_mail: Optional[ON_LEGACY_SEND_MAIL_CALLBACK] = None, + on_legacy_renew: Optional[ON_LEGACY_RENEW_CALLBACK] = None, + on_legacy_admin_request: Optional[ON_LEGACY_ADMIN_REQUEST] = None, + ) -> None: + """Register callbacks from module for each hook.""" + if is_user_expired is not None: + self.is_user_expired_callbacks.append(is_user_expired) + + if on_user_registration is not None: + self.on_user_registration_callbacks.append(on_user_registration) + + # The builtin account validity feature exposes 3 endpoints (send_mail, renew, and + # an admin one). As part of moving the feature into a module, we need to change + # the path from /_matrix/client/unstable/account_validity/... to + # /_synapse/client/account_validity, because: + # + # * the feature isn't part of the Matrix spec thus shouldn't live under /_matrix + # * the way we register servlets means that modules can't register resources + # under /_matrix/client + # + # We need to allow for a transition period between the old and new endpoints + # in order to allow for clients to update (and for emails to be processed). + # + # Once the email-account-validity module is loaded, it will take control of account + # validity by moving the rows from our `account_validity` table into its own table. + # + # Therefore, we need to allow modules (in practice just the one implementing the + # email-based account validity) to temporarily hook into the legacy endpoints so we + # can route the traffic coming into the old endpoints into the module, which is + # why we have the following three temporary hooks. + if on_legacy_send_mail is not None: + if self.on_legacy_send_mail_callback is not None: + raise RuntimeError("Tried to register on_legacy_send_mail twice") + + self.on_legacy_send_mail_callback = on_legacy_send_mail + + if on_legacy_renew is not None: + if self.on_legacy_renew_callback is not None: + raise RuntimeError("Tried to register on_legacy_renew twice") + + self.on_legacy_renew_callback = on_legacy_renew + + if on_legacy_admin_request is not None: + if self.on_legacy_admin_request_callback is not None: + raise RuntimeError("Tried to register on_legacy_admin_request twice") + + self.on_legacy_admin_request_callback = on_legacy_admin_request diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 357e9a574da5..281e8fd0adc2 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -683,19 +683,18 @@ class AccountValidityRenewServlet(RestServlet): PATTERNS = admin_patterns("/account_validity/validity$") def __init__(self, hs: "HomeServer"): - self.account_activity_handler = hs.get_account_validity_handler() + self.account_validity_handler = hs.get_account_validity_handler() + self.account_validity_module_callbacks = ( + hs.get_module_api_callbacks().account_validity + ) self.auth = hs.get_auth() async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: await assert_requester_is_admin(self.auth, request) - if self.account_activity_handler.on_legacy_admin_request_callback: - expiration_ts = ( - await ( - self.account_activity_handler.on_legacy_admin_request_callback( - request - ) - ) + if self.account_validity_module_callbacks.on_legacy_admin_request_callback: + expiration_ts = await self.account_validity_module_callbacks.on_legacy_admin_request_callback( + request ) else: body = parse_json_object_from_request(request) @@ -706,7 +705,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "Missing property 'user_id' in the request body", ) - expiration_ts = await self.account_activity_handler.renew_account_for_user( + expiration_ts = await self.account_validity_handler.renew_account_for_user( body["user_id"], body.get("expiration_ts"), not body.get("enable_renewal_emails", True), diff --git a/synapse/server.py b/synapse/server.py index 8078463530aa..a191c1999347 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -110,6 +110,7 @@ from synapse.media.media_repository import MediaRepository from synapse.metrics.common_usage_metrics import CommonUsageMetricsManager from synapse.module_api import ModuleApi +from synapse.module_api.callbacks import ModuleApiCallbacks from synapse.notifier import Notifier, ReplicationNotifier from synapse.push.bulk_push_rule_evaluator import BulkPushRuleEvaluator from synapse.push.pusherpool import PusherPool @@ -800,6 +801,10 @@ def get_federation_ratelimiter(self) -> FederationRateLimiter: def get_module_api(self) -> ModuleApi: return ModuleApi(self, self.get_auth_handler()) + @cache_in_self + def get_module_api_callbacks(self) -> ModuleApiCallbacks: + return ModuleApiCallbacks() + @cache_in_self def get_account_data_handler(self) -> AccountDataHandler: return AccountDataHandler(self) diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 2b05dffc7d22..7f675c44a278 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -1249,9 +1249,8 @@ async def is_expired(user_id: str) -> bool: # account status will fail. return UserID.from_string(user_id).localpart == "someuser" - self.hs.get_account_validity_handler()._is_user_expired_callbacks.append( - is_expired - ) + account_validity_callbacks = self.hs.get_module_api_callbacks().account_validity + account_validity_callbacks.is_user_expired_callbacks.append(is_expired) self._test_status( users=[user], From 1f5473465d4cb08239bcc97dbbbf185af6841863 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Thu, 16 Mar 2023 11:44:11 +0000 Subject: [PATCH 320/344] Refresh remote profiles that have been marked as stale, in order to fill the user directory. [rei:userdirpriv] (#14756) * Scaffolding for background process to refresh profiles * Add scaffolding for background process to refresh profiles for a given server * Implement the code to select servers to refresh from * Ensure we don't build up multiple looping calls * Make `get_profile` able to respect backoffs * Add logic for refreshing users * When backing off, schedule a refresh when the backoff is over * Wake up the background processes when we receive an interesting state event * Add tests * Newsfile Signed-off-by: Olivier Wilkinson (reivilibre) * Add comment about 1<<62 --------- Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/14756.bugfix | 1 + synapse/handlers/profile.py | 4 +- synapse/handlers/user_directory.py | 242 ++++++++++++++++++ .../storage/databases/main/user_directory.py | 74 ++++++ tests/handlers/test_user_directory.py | 187 +++++++++++++- 5 files changed, 504 insertions(+), 4 deletions(-) create mode 100644 changelog.d/14756.bugfix diff --git a/changelog.d/14756.bugfix b/changelog.d/14756.bugfix new file mode 100644 index 000000000000..12f979e9d041 --- /dev/null +++ b/changelog.d/14756.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. \ No newline at end of file diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 4bf9a047a3bc..9a81a77cbd3b 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -63,7 +63,7 @@ def __init__(self, hs: "HomeServer"): self._third_party_rules = hs.get_third_party_event_rules() - async def get_profile(self, user_id: str) -> JsonDict: + async def get_profile(self, user_id: str, ignore_backoff: bool = True) -> JsonDict: target_user = UserID.from_string(user_id) if self.hs.is_mine(target_user): @@ -81,7 +81,7 @@ async def get_profile(self, user_id: str) -> JsonDict: destination=target_user.domain, query_type="profile", args={"user_id": user_id}, - ignore_backoff=True, + ignore_backoff=ignore_backoff, ) return result except RequestSendFailed as e: diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 0815be79fa54..28a92d41d6fc 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -13,15 +13,22 @@ # limitations under the License. import logging +from http import HTTPStatus from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple +from twisted.internet.interfaces import IDelayedCall + import synapse.metrics from synapse.api.constants import EventTypes, HistoryVisibility, JoinRules, Membership +from synapse.api.errors import Codes, SynapseError from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.databases.main.user_directory import SearchResult from synapse.storage.roommember import ProfileInfo +from synapse.types import UserID from synapse.util.metrics import Measure +from synapse.util.retryutils import NotRetryingDestination +from synapse.util.stringutils import non_null_str_or_none if TYPE_CHECKING: from synapse.server import HomeServer @@ -33,6 +40,25 @@ # then be coalesced such that only one /profile request is made). USER_DIRECTORY_STALE_REFRESH_TIME_MS = 60 * 1000 +# Maximum number of remote servers that we will attempt to refresh profiles for +# in one go. +MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO = 5 + +# As long as we have servers to refresh (without backoff), keep adding more +# every 15 seconds. +INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = 15 + + +def calculate_time_of_next_retry(now_ts: int, retry_count: int) -> int: + """ + Calculates the time of a next retry given `now_ts` in ms and the number + of failures encountered thus far. + + Currently the sequence goes: + 1 min, 5 min, 25 min, 2 hour, 10 hour, 52 hour, 10 day, 7.75 week + """ + return now_ts + 60_000 * (5 ** min(retry_count, 7)) + class UserDirectoryHandler(StateDeltasHandler): """Handles queries and updates for the user_directory. @@ -69,12 +95,24 @@ def __init__(self, hs: "HomeServer"): self.update_user_directory = hs.config.worker.should_update_user_directory self.search_all_users = hs.config.userdirectory.user_directory_search_all_users self.spam_checker = hs.get_spam_checker() + self._hs = hs + # The current position in the current_state_delta stream self.pos: Optional[int] = None # Guard to ensure we only process deltas one at a time self._is_processing = False + # Guard to ensure we only have one process for refreshing remote profiles + self._is_refreshing_remote_profiles = False + # Handle to cancel the `call_later` of `kick_off_remote_profile_refresh_process` + self._refresh_remote_profiles_call_later: Optional[IDelayedCall] = None + + # Guard to ensure we only have one process for refreshing remote profiles + # for the given servers. + # Set of server names. + self._is_refreshing_remote_profiles_for_servers: Set[str] = set() + if self.update_user_directory: self.notifier.add_replication_callback(self.notify_new_event) @@ -82,6 +120,11 @@ def __init__(self, hs: "HomeServer"): # we start populating the user directory self.clock.call_later(0, self.notify_new_event) + # Kick off the profile refresh process on startup + self._refresh_remote_profiles_call_later = self.clock.call_later( + 10, self.kick_off_remote_profile_refresh_process + ) + async def search_users( self, user_id: str, search_term: str, limit: int ) -> SearchResult: @@ -483,6 +526,20 @@ async def _handle_possible_remote_profile_change( next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME_MS, retry_counter=0, ) + # Schedule a wake-up to refresh the user directory for this server. + # We intentionally wake up this server directly because we don't want + # other servers ahead of it in the queue to get in the way of updating + # the profile if the server only just sent us an event. + self.clock.call_later( + USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1, + self.kick_off_remote_profile_refresh_process_for_remote_server, + UserID.from_string(user_id).domain, + ) + # Schedule a wake-up to handle any backoffs that may occur in the future. + self.clock.call_later( + 2 * USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1, + self.kick_off_remote_profile_refresh_process, + ) return prev_name = prev_event.content.get("displayname") @@ -505,3 +562,188 @@ async def _handle_possible_remote_profile_change( # Only update if something has changed, or we didn't have a previous event # in the first place. await self.store.update_profile_in_user_dir(user_id, new_name, new_avatar) + + def kick_off_remote_profile_refresh_process(self) -> None: + """Called when there may be remote users with stale profiles to be refreshed""" + if not self.update_user_directory: + return + + if self._is_refreshing_remote_profiles: + return + + if self._refresh_remote_profiles_call_later: + if self._refresh_remote_profiles_call_later.active(): + self._refresh_remote_profiles_call_later.cancel() + self._refresh_remote_profiles_call_later = None + + async def process() -> None: + try: + await self._unsafe_refresh_remote_profiles() + finally: + self._is_refreshing_remote_profiles = False + + self._is_refreshing_remote_profiles = True + run_as_background_process("user_directory.refresh_remote_profiles", process) + + async def _unsafe_refresh_remote_profiles(self) -> None: + limit = MAX_SERVERS_TO_REFRESH_PROFILES_FOR_IN_ONE_GO - len( + self._is_refreshing_remote_profiles_for_servers + ) + if limit <= 0: + # nothing to do: already refreshing the maximum number of servers + # at once. + # Come back later. + self._refresh_remote_profiles_call_later = self.clock.call_later( + INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES, + self.kick_off_remote_profile_refresh_process, + ) + return + + servers_to_refresh = ( + await self.store.get_remote_servers_with_profiles_to_refresh( + now_ts=self.clock.time_msec(), limit=limit + ) + ) + + if not servers_to_refresh: + # Do we have any backing-off servers that we should try again + # for eventually? + # By setting `now` is a point in the far future, we can ask for + # which server/user is next to be refreshed, even though it is + # not actually refreshable *now*. + end_of_time = 1 << 62 + backing_off_servers = ( + await self.store.get_remote_servers_with_profiles_to_refresh( + now_ts=end_of_time, limit=1 + ) + ) + if backing_off_servers: + # Find out when the next user is refreshable and schedule a + # refresh then. + backing_off_server_name = backing_off_servers[0] + users = await self.store.get_remote_users_to_refresh_on_server( + backing_off_server_name, now_ts=end_of_time, limit=1 + ) + if not users: + return + _, _, next_try_at_ts = users[0] + self._refresh_remote_profiles_call_later = self.clock.call_later( + ((next_try_at_ts - self.clock.time_msec()) // 1000) + 2, + self.kick_off_remote_profile_refresh_process, + ) + + return + + for server_to_refresh in servers_to_refresh: + self.kick_off_remote_profile_refresh_process_for_remote_server( + server_to_refresh + ) + + self._refresh_remote_profiles_call_later = self.clock.call_later( + INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES, + self.kick_off_remote_profile_refresh_process, + ) + + def kick_off_remote_profile_refresh_process_for_remote_server( + self, server_name: str + ) -> None: + """Called when there may be remote users with stale profiles to be refreshed + on the given server.""" + if not self.update_user_directory: + return + + if server_name in self._is_refreshing_remote_profiles_for_servers: + return + + async def process() -> None: + try: + await self._unsafe_refresh_remote_profiles_for_remote_server( + server_name + ) + finally: + self._is_refreshing_remote_profiles_for_servers.remove(server_name) + + self._is_refreshing_remote_profiles_for_servers.add(server_name) + run_as_background_process( + "user_directory.refresh_remote_profiles_for_remote_server", process + ) + + async def _unsafe_refresh_remote_profiles_for_remote_server( + self, server_name: str + ) -> None: + logger.info("Refreshing profiles in user directory for %s", server_name) + + while True: + # Get a handful of users to process. + next_batch = await self.store.get_remote_users_to_refresh_on_server( + server_name, now_ts=self.clock.time_msec(), limit=10 + ) + if not next_batch: + # Finished for now + return + + for user_id, retry_counter, _ in next_batch: + # Request the profile of the user. + try: + profile = await self._hs.get_profile_handler().get_profile( + user_id, ignore_backoff=False + ) + except NotRetryingDestination as e: + logger.info( + "Failed to refresh profile for %r because the destination is undergoing backoff", + user_id, + ) + # As a special-case, we back off until the destination is no longer + # backed off from. + await self.store.set_remote_user_profile_in_user_dir_stale( + user_id, + e.retry_last_ts + e.retry_interval, + retry_counter=retry_counter + 1, + ) + continue + except SynapseError as e: + if e.code == HTTPStatus.NOT_FOUND and e.errcode == Codes.NOT_FOUND: + # The profile doesn't exist. + # TODO Does this mean we should clear it from our user + # directory? + await self.store.clear_remote_user_profile_in_user_dir_stale( + user_id + ) + logger.warning( + "Refresh of remote profile %r: not found (%r)", + user_id, + e.msg, + ) + continue + + logger.warning( + "Failed to refresh profile for %r because %r", user_id, e + ) + await self.store.set_remote_user_profile_in_user_dir_stale( + user_id, + calculate_time_of_next_retry( + self.clock.time_msec(), retry_counter + 1 + ), + retry_counter=retry_counter + 1, + ) + continue + except Exception: + logger.error( + "Failed to refresh profile for %r due to unhandled exception", + user_id, + exc_info=True, + ) + await self.store.set_remote_user_profile_in_user_dir_stale( + user_id, + calculate_time_of_next_retry( + self.clock.time_msec(), retry_counter + 1 + ), + retry_counter=retry_counter + 1, + ) + continue + + await self.store.update_profile_in_user_dir( + user_id, + display_name=non_null_str_or_none(profile.get("displayname")), + avatar_url=non_null_str_or_none(profile.get("avatar_url")), + ) diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 9cf01b7f36ba..97f09b73dde6 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -504,6 +504,80 @@ async def set_remote_user_profile_in_user_dir_stale( desc="set_remote_user_profile_in_user_dir_stale", ) + async def clear_remote_user_profile_in_user_dir_stale(self, user_id: str) -> None: + """ + Marks a remote user as no longer having a possibly-stale user directory profile. + + Args: + user_id: the remote user who no longer has a stale profile on this server. + """ + await self.db_pool.simple_delete( + table="user_directory_stale_remote_users", + keyvalues={"user_id": user_id}, + desc="clear_remote_user_profile_in_user_dir_stale", + ) + + async def get_remote_servers_with_profiles_to_refresh( + self, now_ts: int, limit: int + ) -> List[str]: + """ + Get a list of up to `limit` server names which have users whose + locally-cached profiles we believe to be stale + and are refreshable given the current time `now_ts` in milliseconds. + """ + + def _get_remote_servers_with_refreshable_profiles_txn( + txn: LoggingTransaction, + ) -> List[str]: + sql = """ + SELECT user_server_name + FROM user_directory_stale_remote_users + WHERE next_try_at_ts < ? + GROUP BY user_server_name + ORDER BY MIN(next_try_at_ts), user_server_name + LIMIT ? + """ + txn.execute(sql, (now_ts, limit)) + return [row[0] for row in txn] + + return await self.db_pool.runInteraction( + "get_remote_servers_with_profiles_to_refresh", + _get_remote_servers_with_refreshable_profiles_txn, + ) + + async def get_remote_users_to_refresh_on_server( + self, server_name: str, now_ts: int, limit: int + ) -> List[Tuple[str, int, int]]: + """ + Get a list of up to `limit` user IDs from the server `server_name` + whose locally-cached profiles we believe to be stale + and are refreshable given the current time `now_ts` in milliseconds. + + Returns: + tuple of: + - User ID + - Retry counter (number of failures so far) + - Time the retry is scheduled for, in milliseconds + """ + + def _get_remote_users_to_refresh_on_server_txn( + txn: LoggingTransaction, + ) -> List[Tuple[str, int, int]]: + sql = """ + SELECT user_id, retry_counter, next_try_at_ts + FROM user_directory_stale_remote_users + WHERE user_server_name = ? AND next_try_at_ts < ? + ORDER BY next_try_at_ts + LIMIT ? + """ + txn.execute(sql, (server_name, now_ts, limit)) + return cast(List[Tuple[str, int, int]], txn.fetchall()) + + return await self.db_pool.runInteraction( + "get_remote_users_to_refresh_on_server", + _get_remote_users_to_refresh_on_server_txn, + ) + async def update_profile_in_user_dir( self, user_id: str, display_name: Optional[str], avatar_url: Optional[str] ) -> None: diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index a02c1c62279a..da4d24082648 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -19,17 +19,18 @@ import synapse.rest.admin from synapse.api.constants import UserTypes +from synapse.api.errors import SynapseError from synapse.api.room_versions import RoomVersion, RoomVersions from synapse.appservice import ApplicationService from synapse.rest.client import login, register, room, user_directory from synapse.server import HomeServer from synapse.storage.roommember import ProfileInfo -from synapse.types import UserProfile, create_requester +from synapse.types import JsonDict, UserProfile, create_requester from synapse.util import Clock from tests import unittest from tests.storage.test_user_directory import GetUserDirectoryTables -from tests.test_utils import make_awaitable +from tests.test_utils import event_injection, make_awaitable from tests.test_utils.event_injection import inject_member_event from tests.unittest import override_config @@ -1103,3 +1104,185 @@ def test_disabling_room_list(self) -> None: ) self.assertEqual(200, channel.code, channel.result) self.assertTrue(len(channel.json_body["results"]) == 0) + + +class UserDirectoryRemoteProfileTestCase(unittest.HomeserverTestCase): + servlets = [ + login.register_servlets, + synapse.rest.admin.register_servlets, + register.register_servlets, + room.register_servlets, + ] + + def default_config(self) -> JsonDict: + config = super().default_config() + # Re-enables updating the user directory, as that functionality is needed below. + config["update_user_directory_from_worker"] = None + return config + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + self.alice = self.register_user("alice", "alice123") + self.alice_tok = self.login("alice", "alice123") + self.user_dir_helper = GetUserDirectoryTables(self.store) + self.user_dir_handler = hs.get_user_directory_handler() + self.profile_handler = hs.get_profile_handler() + + # Cancel the startup call: in the steady-state case we can't rely on it anyway. + assert self.user_dir_handler._refresh_remote_profiles_call_later is not None + self.user_dir_handler._refresh_remote_profiles_call_later.cancel() + + def test_public_rooms_have_profiles_collected(self) -> None: + """ + In a public room, member state events are treated as reflecting the user's + real profile and they are accepted. + (The main motivation for accepting this is to prevent having to query + *every* single profile change over federation.) + """ + room_id = self.helper.create_room_as( + self.alice, is_public=True, tok=self.alice_tok + ) + self.get_success( + event_injection.inject_member_event( + self.hs, + room_id, + "@bruce:remote", + "join", + "@bruce:remote", + extra_content={ + "displayname": "Bruce!", + "avatar_url": "mxc://remote/123", + }, + ) + ) + # Sending this event makes the streams move forward after the injection... + self.helper.send(room_id, "Test", tok=self.alice_tok) + self.pump(0.1) + + profiles = self.get_success( + self.user_dir_helper.get_profiles_in_user_directory() + ) + self.assertEqual( + profiles.get("@bruce:remote"), + ProfileInfo(display_name="Bruce!", avatar_url="mxc://remote/123"), + ) + + def test_private_rooms_do_not_have_profiles_collected(self) -> None: + """ + In a private room, member state events are not pulled out and used to populate + the user directory. + """ + room_id = self.helper.create_room_as( + self.alice, is_public=False, tok=self.alice_tok + ) + self.get_success( + event_injection.inject_member_event( + self.hs, + room_id, + "@bruce:remote", + "join", + "@bruce:remote", + extra_content={ + "displayname": "super-duper bruce", + "avatar_url": "mxc://remote/456", + }, + ) + ) + # Sending this event makes the streams move forward after the injection... + self.helper.send(room_id, "Test", tok=self.alice_tok) + self.pump(0.1) + + profiles = self.get_success( + self.user_dir_helper.get_profiles_in_user_directory() + ) + self.assertNotIn("@bruce:remote", profiles) + + def test_private_rooms_have_profiles_requested(self) -> None: + """ + When a name changes in a private room, the homeserver instead requests + the user's global profile over federation. + """ + + async def get_remote_profile( + user_id: str, ignore_backoff: bool = True + ) -> JsonDict: + if user_id == "@bruce:remote": + return { + "displayname": "Sir Bruce Bruceson", + "avatar_url": "mxc://remote/789", + } + else: + raise ValueError(f"unable to fetch {user_id}") + + with patch.object(self.profile_handler, "get_profile", get_remote_profile): + # Continue from the earlier test... + self.test_private_rooms_do_not_have_profiles_collected() + + # Advance by a minute + self.reactor.advance(61.0) + + profiles = self.get_success( + self.user_dir_helper.get_profiles_in_user_directory() + ) + self.assertEqual( + profiles.get("@bruce:remote"), + ProfileInfo( + display_name="Sir Bruce Bruceson", avatar_url="mxc://remote/789" + ), + ) + + def test_profile_requests_are_retried(self) -> None: + """ + When we fail to fetch the user's profile over federation, + we try again later. + """ + has_failed_once = False + + async def get_remote_profile( + user_id: str, ignore_backoff: bool = True + ) -> JsonDict: + nonlocal has_failed_once + if user_id == "@bruce:remote": + if not has_failed_once: + has_failed_once = True + raise SynapseError(502, "temporary network problem") + + return { + "displayname": "Sir Bruce Bruceson", + "avatar_url": "mxc://remote/789", + } + else: + raise ValueError(f"unable to fetch {user_id}") + + with patch.object(self.profile_handler, "get_profile", get_remote_profile): + # Continue from the earlier test... + self.test_private_rooms_do_not_have_profiles_collected() + + # Advance by a minute + self.reactor.advance(61.0) + + # The request has already failed once + self.assertTrue(has_failed_once) + + # The profile has yet to be updated. + profiles = self.get_success( + self.user_dir_helper.get_profiles_in_user_directory() + ) + self.assertNotIn( + "@bruce:remote", + profiles, + ) + + # Advance by five minutes, after the backoff has finished + self.reactor.advance(301.0) + + # The profile should have been updated now + profiles = self.get_success( + self.user_dir_helper.get_profiles_in_user_directory() + ) + self.assertEqual( + profiles.get("@bruce:remote"), + ProfileInfo( + display_name="Sir Bruce Bruceson", avatar_url="mxc://remote/789" + ), + ) From b0a0fb5c97449720c679045f1bb5a5f393b1c267 Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Thu, 16 Mar 2023 16:00:03 +0200 Subject: [PATCH 321/344] Implement MSC2659: application service ping endpoint (#15249) Signed-off-by: Tulir Asokan --- changelog.d/15249.feature | 1 + synapse/api/errors.py | 5 ++ synapse/appservice/api.py | 13 +++ synapse/config/experimental.py | 3 + synapse/rest/__init__.py | 2 + synapse/rest/client/appservice_ping.py | 115 +++++++++++++++++++++++++ synapse/rest/client/versions.py | 2 + 7 files changed, 141 insertions(+) create mode 100644 changelog.d/15249.feature create mode 100644 synapse/rest/client/appservice_ping.py diff --git a/changelog.d/15249.feature b/changelog.d/15249.feature new file mode 100644 index 000000000000..92d48a208723 --- /dev/null +++ b/changelog.d/15249.feature @@ -0,0 +1 @@ +Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index e1737de59b51..8c6822f3c6ea 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -108,6 +108,11 @@ class Codes(str, Enum): USER_AWAITING_APPROVAL = "ORG.MATRIX.MSC3866_USER_AWAITING_APPROVAL" + AS_PING_URL_NOT_SET = "FI.MAU.MSC2659_URL_NOT_SET" + AS_PING_BAD_STATUS = "FI.MAU.MSC2659_BAD_STATUS" + AS_PING_CONNECTION_TIMEOUT = "FI.MAU.MSC2659_CONNECTION_TIMEOUT" + AS_PING_CONNECTION_FAILED = "FI.MAU.MSC2659_CONNECTION_FAILED" + # Attempt to send a second annotation with the same event type & annotation key # MSC2677 DUPLICATE_ANNOTATION = "M_DUPLICATE_ANNOTATION" diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 1a6f69e7d3f3..4812fb44961f 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -266,6 +266,19 @@ async def _get() -> Optional[JsonDict]: key = (service.id, protocol) return await self.protocol_meta_cache.wrap(key, _get) + async def ping(self, service: "ApplicationService", txn_id: Optional[str]) -> None: + # The caller should check that url is set + assert service.url is not None, "ping called without URL being set" + + # This is required by the configuration. + assert service.hs_token is not None + + await self.post_json_get_json( + uri=service.url + "/_matrix/app/unstable/fi.mau.msc2659/ping", + post_json={"transaction_id": txn_id}, + headers={"Authorization": [f"Bearer {service.hs_token}"]}, + ) + async def push_bulk( self, service: "ApplicationService", diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 7e05f78f70a2..99dcd27c7426 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -178,3 +178,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC3967: Do not require UIA when first uploading cross signing keys self.msc3967_enabled = experimental.get("msc3967_enabled", False) + + # MSC2659: Application service ping endpoint + self.msc2659_enabled = experimental.get("msc2659_enabled", False) diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index 2e19e055d3aa..55b448adfdba 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -20,6 +20,7 @@ account, account_data, account_validity, + appservice_ping, auth, capabilities, devices, @@ -140,6 +141,7 @@ def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None: if is_main_process: password_policy.register_servlets(hs, client_resource) knock.register_servlets(hs, client_resource) + appservice_ping.register_servlets(hs, client_resource) # moving to /_synapse/admin if is_main_process: diff --git a/synapse/rest/client/appservice_ping.py b/synapse/rest/client/appservice_ping.py new file mode 100644 index 000000000000..31466a4ad4cb --- /dev/null +++ b/synapse/rest/client/appservice_ping.py @@ -0,0 +1,115 @@ +# Copyright 2023 Tulir Asokan +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import time +from http import HTTPStatus +from typing import TYPE_CHECKING, Any, Dict, Tuple + +from synapse.api.errors import ( + CodeMessageException, + Codes, + HttpResponseException, + SynapseError, +) +from synapse.http import RequestTimedOutError +from synapse.http.server import HttpServer +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.http.site import SynapseRequest +from synapse.types import JsonDict + +from ._base import client_patterns + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class AppservicePingRestServlet(RestServlet): + PATTERNS = client_patterns( + "/fi.mau.msc2659/appservice/(?P[^/]*)/ping", + unstable=True, + releases=(), + ) + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.as_api = hs.get_application_service_api() + self.auth = hs.get_auth() + + async def on_POST( + self, request: SynapseRequest, appservice_id: str + ) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + + if not requester.app_service: + raise SynapseError( + HTTPStatus.FORBIDDEN, + "Only application services can use the /appservice/ping endpoint", + Codes.FORBIDDEN, + ) + elif requester.app_service.id != appservice_id: + raise SynapseError( + HTTPStatus.FORBIDDEN, + "Mismatching application service ID in path", + Codes.FORBIDDEN, + ) + elif not requester.app_service.url: + raise SynapseError( + HTTPStatus.BAD_REQUEST, + "The application service does not have a URL set", + Codes.AS_PING_URL_NOT_SET, + ) + + content = parse_json_object_from_request(request) + txn_id = content.get("transaction_id", None) + + start = time.monotonic() + try: + await self.as_api.ping(requester.app_service, txn_id) + except RequestTimedOutError as e: + raise SynapseError( + HTTPStatus.GATEWAY_TIMEOUT, + e.msg, + Codes.AS_PING_CONNECTION_TIMEOUT, + ) + except CodeMessageException as e: + additional_fields: Dict[str, Any] = {"status": e.code} + if isinstance(e, HttpResponseException): + try: + additional_fields["body"] = e.response.decode("utf-8") + except UnicodeDecodeError: + pass + raise SynapseError( + HTTPStatus.BAD_GATEWAY, + f"HTTP {e.code} {e.msg}", + Codes.AS_PING_BAD_STATUS, + additional_fields=additional_fields, + ) + except Exception as e: + raise SynapseError( + HTTPStatus.BAD_GATEWAY, + f"{type(e).__name__}: {e}", + Codes.AS_PING_CONNECTION_FAILED, + ) + + duration = time.monotonic() - start + + return HTTPStatus.OK, {"duration": int(duration * 1000)} + + +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + if hs.config.experimental.msc2659_enabled: + AppservicePingRestServlet(hs).register(http_server) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index e19c0946c035..dba0f0891ac6 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -109,6 +109,8 @@ def on_GET(self, request: Request) -> Tuple[int, JsonDict]: "org.matrix.msc3773": self.config.experimental.msc3773_enabled, # Allows moderators to fetch redacted event content as described in MSC2815 "fi.mau.msc2815": self.config.experimental.msc2815_enabled, + # Adds a ping endpoint for appservices to check HS->AS connection + "fi.mau.msc2659": self.config.experimental.msc2659_enabled, # Adds support for login token requests as per MSC3882 "org.matrix.msc3882": self.config.experimental.msc3882_enabled, # Adds support for remotely enabling/disabling pushers, as per MSC3881 From afb216c202feb143ce70c74c16fa50ca93da6157 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Thu, 16 Mar 2023 11:13:30 -0400 Subject: [PATCH 322/344] Remove no-op send_command for Redis replication. (#15274) With Redis commands do not need to be re-issued by the main process (they fan-out to all processes at once) and thus it is no longer necessary to worry about them reflecting recursively forever. --- changelog.d/15272.misc | 2 +- changelog.d/15274.misc | 1 + synapse/replication/tcp/handler.py | 26 +------- .../replication/tcp/test_remote_server_up.py | 63 ------------------- 4 files changed, 3 insertions(+), 89 deletions(-) create mode 100644 changelog.d/15274.misc delete mode 100644 tests/replication/tcp/test_remote_server_up.py diff --git a/changelog.d/15272.misc b/changelog.d/15272.misc index 7a3ef323e9aa..f7c0276ecc22 100644 --- a/changelog.d/15272.misc +++ b/changelog.d/15272.misc @@ -1 +1 @@ -Remove unused class `DirectTcpReplicationClientFactory`. +Clean-up direct TCP replication code. diff --git a/changelog.d/15274.misc b/changelog.d/15274.misc new file mode 100644 index 000000000000..f7c0276ecc22 --- /dev/null +++ b/changelog.d/15274.misc @@ -0,0 +1 @@ +Clean-up direct TCP replication code. diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index d03a53d76429..2290b3e6fe7d 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -625,23 +625,6 @@ def on_REMOTE_SERVER_UP( self._notifier.notify_remote_server_up(cmd.data) - # We relay to all other connections to ensure every instance gets the - # notification. - # - # When configured to use redis we'll always only have one connection and - # so this is a no-op (all instances will have already received the same - # REMOTE_SERVER_UP command). - # - # For direct TCP connections this will relay to all other connections - # connected to us. When on master this will correctly fan out to all - # other direct TCP clients and on workers there'll only be the one - # connection to master. - # - # (The logic here should also be sound if we have a mix of Redis and - # direct TCP connections so long as there is only one traffic route - # between two instances, but that is not currently supported). - self.send_command(cmd, ignore_conn=conn) - def new_connection(self, connection: IReplicationConnection) -> None: """Called when we have a new connection.""" self._connections.append(connection) @@ -689,21 +672,14 @@ def connected(self) -> bool: """ return bool(self._connections) - def send_command( - self, cmd: Command, ignore_conn: Optional[IReplicationConnection] = None - ) -> None: + def send_command(self, cmd: Command) -> None: """Send a command to all connected connections. Args: cmd - ignore_conn: If set don't send command to the given connection. - Used when relaying commands from one connection to all others. """ if self._connections: for connection in self._connections: - if connection == ignore_conn: - continue - try: connection.send_command(cmd) except Exception: diff --git a/tests/replication/tcp/test_remote_server_up.py b/tests/replication/tcp/test_remote_server_up.py deleted file mode 100644 index b75fc05fd55b..000000000000 --- a/tests/replication/tcp/test_remote_server_up.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright 2020 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import Tuple - -from twisted.internet.address import IPv4Address -from twisted.internet.interfaces import IProtocol -from twisted.test.proto_helpers import MemoryReactor, StringTransport - -from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory -from synapse.server import HomeServer -from synapse.util import Clock - -from tests.unittest import HomeserverTestCase - - -class RemoteServerUpTestCase(HomeserverTestCase): - def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.factory = ReplicationStreamProtocolFactory(hs) - - def _make_client(self) -> Tuple[IProtocol, StringTransport]: - """Create a new direct TCP replication connection""" - - proto = self.factory.buildProtocol(IPv4Address("TCP", "127.0.0.1", 0)) - transport = StringTransport() - proto.makeConnection(transport) - - # We can safely ignore the commands received during connection. - self.pump() - transport.clear() - - return proto, transport - - def test_relay(self) -> None: - """Test that Synapse will relay REMOTE_SERVER_UP commands to all - other connections, but not the one that sent it. - """ - - proto1, transport1 = self._make_client() - - # We shouldn't receive an echo. - proto1.dataReceived(b"REMOTE_SERVER_UP example.com\n") - self.pump() - self.assertEqual(transport1.value(), b"") - - # But we should see an echo if we connect another client - proto2, transport2 = self._make_client() - proto1.dataReceived(b"REMOTE_SERVER_UP example.com\n") - - self.pump() - self.assertEqual(transport1.value(), b"") - self.assertEqual(transport2.value(), b"REMOTE_SERVER_UP example.com\n") From 66fc166b966204d47ac438982a3a41137f614c4c Mon Sep 17 00:00:00 2001 From: reivilibre Date: Fri, 17 Mar 2023 13:02:30 +0000 Subject: [PATCH 323/344] Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. (#15275) --- changelog.d/15275.misc | 1 + docker/configure_workers_and_start.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15275.misc diff --git a/changelog.d/15275.misc b/changelog.d/15275.misc new file mode 100644 index 000000000000..b222acd72b4b --- /dev/null +++ b/changelog.d/15275.misc @@ -0,0 +1 @@ +Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. \ No newline at end of file diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index cfb16c2e2240..376f9ed6354a 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -422,7 +422,7 @@ def add_worker_roles_to_shared_config( def merge_worker_template_configs( - existing_dict: Dict[str, Any] | None, + existing_dict: Optional[Dict[str, Any]], to_be_merged_dict: Dict[str, Any], ) -> Dict[str, Any]: """When given an existing dict of worker template configuration consisting with both From 3d70cc393fb32235bbeb94a0b97691dff5531f4d Mon Sep 17 00:00:00 2001 From: Jason Little Date: Fri, 17 Mar 2023 08:50:31 -0500 Subject: [PATCH 324/344] Load `/register/available` endpoint on workers (#15268) --- changelog.d/15268.feature | 1 + docker/configure_workers_and_start.py | 1 + docs/workers.md | 1 + synapse/rest/client/register.py | 2 +- 4 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15268.feature diff --git a/changelog.d/15268.feature b/changelog.d/15268.feature new file mode 100644 index 000000000000..5f1f1a0f589c --- /dev/null +++ b/changelog.d/15268.feature @@ -0,0 +1 @@ +Allow loading `/register/available` endpoint on workers. diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 376f9ed6354a..3f2f5c2daf85 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -163,6 +163,7 @@ "^/_matrix/client/versions$", "^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$", "^/_matrix/client/(r0|v3|unstable)/register$", + "^/_matrix/client/(r0|v3|unstable)/register/available$", "^/_matrix/client/(r0|v3|unstable)/auth/.*/fallback/web$", "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$", "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event", diff --git a/docs/workers.md b/docs/workers.md index e0e99b44539e..bf7690f5aff2 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -245,6 +245,7 @@ information. # Registration/login requests ^/_matrix/client/(api/v1|r0|v3|unstable)/login$ ^/_matrix/client/(r0|v3|unstable)/register$ + ^/_matrix/client/(r0|v3|unstable)/register/available$ ^/_matrix/client/v1/register/m.login.registration_token/validity$ # Event sending requests diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index bce806f2bbb4..4adb5271d22f 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -956,7 +956,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: if hs.config.worker.worker_app is None: EmailRegisterRequestTokenRestServlet(hs).register(http_server) MsisdnRegisterRequestTokenRestServlet(hs).register(http_server) - UsernameAvailabilityRestServlet(hs).register(http_server) RegistrationSubmitTokenServlet(hs).register(http_server) + UsernameAvailabilityRestServlet(hs).register(http_server) RegistrationTokenValidityRestServlet(hs).register(http_server) RegisterRestServlet(hs).register(http_server) From 14d8d416589239d51b798ee4a338aca09437f860 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Mar 2023 07:27:58 -0400 Subject: [PATCH 325/344] Bump types-requests from 2.28.11.12 to 2.28.11.15 (#15291) --- changelog.d/15291.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15291.misc diff --git a/changelog.d/15291.misc b/changelog.d/15291.misc new file mode 100644 index 000000000000..74de7a2065af --- /dev/null +++ b/changelog.d/15291.misc @@ -0,0 +1 @@ +Bump types-requests from 2.28.11.12 to 2.28.11.15. diff --git a/poetry.lock b/poetry.lock index 518fa5874be9..a89994eb8fb4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2684,14 +2684,14 @@ files = [ [[package]] name = "types-requests" -version = "2.28.11.12" +version = "2.28.11.15" description = "Typing stubs for requests" category = "dev" optional = false python-versions = "*" files = [ - {file = "types-requests-2.28.11.12.tar.gz", hash = "sha256:fd530aab3fc4f05ee36406af168f0836e6f00f1ee51a0b96b7311f82cb675230"}, - {file = "types_requests-2.28.11.12-py3-none-any.whl", hash = "sha256:dbc2933635860e553ffc59f5e264264981358baffe6342b925e3eb8261f866ee"}, + {file = "types-requests-2.28.11.15.tar.gz", hash = "sha256:fc8eaa09cc014699c6b63c60c2e3add0c8b09a410c818b5ac6e65f92a26dde09"}, + {file = "types_requests-2.28.11.15-py3-none-any.whl", hash = "sha256:a05e4c7bc967518fba5789c341ea8b0c942776ee474c7873129a61161978e586"}, ] [package.dependencies] From 2cfa6a30018891e634fd79b8f9f6aafe2ffd6754 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Mar 2023 07:31:22 -0400 Subject: [PATCH 326/344] Bump txredisapi from 1.4.7 to 1.4.9 (#15289) --- changelog.d/15289.misc | 1 + poetry.lock | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/15289.misc diff --git a/changelog.d/15289.misc b/changelog.d/15289.misc new file mode 100644 index 000000000000..1a3921af8417 --- /dev/null +++ b/changelog.d/15289.misc @@ -0,0 +1 @@ +Bump txredisapi from 1.4.7 to 1.4.9. diff --git a/poetry.lock b/poetry.lock index a89994eb8fb4..b65f5a1c3eb0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2523,14 +2523,14 @@ files = [ [[package]] name = "txredisapi" -version = "1.4.7" +version = "1.4.9" description = "non-blocking redis client for python" category = "main" optional = true python-versions = "*" files = [ - {file = "txredisapi-1.4.7-py3-none-any.whl", hash = "sha256:34c9eba8d34f452d30661f073b67b8cd42b695e3d31678ec1bbf628a65a0f059"}, - {file = "txredisapi-1.4.7.tar.gz", hash = "sha256:e6cc43f51e35d608abdca8f8c7d20e148fe1d82679f6e584baea613ebec812bb"}, + {file = "txredisapi-1.4.9-py3-none-any.whl", hash = "sha256:72e6ad09cc5fffe3bec2e55e5bfb74407bd357565fc212e6003f7e26ef7d8f78"}, + {file = "txredisapi-1.4.9.tar.gz", hash = "sha256:c9607062d05e4d0b8ef84719eb76a3fe7d5ccd606a2acf024429da51d6e84559"}, ] [package.dependencies] From 1870b44d2332ef2a567c7fa32be538cc938d5ef8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Mar 2023 07:32:49 -0400 Subject: [PATCH 327/344] Bump pydantic from 1.10.4 to 1.10.6 (#15286) --- changelog.d/15286.misc | 1 + poetry.lock | 74 +++++++++++++++++++++--------------------- 2 files changed, 38 insertions(+), 37 deletions(-) create mode 100644 changelog.d/15286.misc diff --git a/changelog.d/15286.misc b/changelog.d/15286.misc new file mode 100644 index 000000000000..48a5b400be41 --- /dev/null +++ b/changelog.d/15286.misc @@ -0,0 +1 @@ +Bump pydantic from 1.10.4 to 1.10.6. diff --git a/poetry.lock b/poetry.lock index b65f5a1c3eb0..9185d79cf73a 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1568,48 +1568,48 @@ files = [ [[package]] name = "pydantic" -version = "1.10.4" +version = "1.10.6" description = "Data validation and settings management using python type hints" category = "main" optional = false python-versions = ">=3.7" files = [ - {file = "pydantic-1.10.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5635de53e6686fe7a44b5cf25fcc419a0d5e5c1a1efe73d49d48fe7586db854"}, - {file = "pydantic-1.10.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6dc1cc241440ed7ca9ab59d9929075445da6b7c94ced281b3dd4cfe6c8cff817"}, - {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51bdeb10d2db0f288e71d49c9cefa609bca271720ecd0c58009bd7504a0c464c"}, - {file = "pydantic-1.10.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78cec42b95dbb500a1f7120bdf95c401f6abb616bbe8785ef09887306792e66e"}, - {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8775d4ef5e7299a2f4699501077a0defdaac5b6c4321173bcb0f3c496fbadf85"}, - {file = "pydantic-1.10.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:572066051eeac73d23f95ba9a71349c42a3e05999d0ee1572b7860235b850cc6"}, - {file = "pydantic-1.10.4-cp310-cp310-win_amd64.whl", hash = "sha256:7feb6a2d401f4d6863050f58325b8d99c1e56f4512d98b11ac64ad1751dc647d"}, - {file = "pydantic-1.10.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:39f4a73e5342b25c2959529f07f026ef58147249f9b7431e1ba8414a36761f53"}, - {file = "pydantic-1.10.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:983e720704431a6573d626b00662eb78a07148c9115129f9b4351091ec95ecc3"}, - {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75d52162fe6b2b55964fbb0af2ee58e99791a3138588c482572bb6087953113a"}, - {file = "pydantic-1.10.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fdf8d759ef326962b4678d89e275ffc55b7ce59d917d9f72233762061fd04a2d"}, - {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05a81b006be15655b2a1bae5faa4280cf7c81d0e09fcb49b342ebf826abe5a72"}, - {file = "pydantic-1.10.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d88c4c0e5c5dfd05092a4b271282ef0588e5f4aaf345778056fc5259ba098857"}, - {file = "pydantic-1.10.4-cp311-cp311-win_amd64.whl", hash = "sha256:6a05a9db1ef5be0fe63e988f9617ca2551013f55000289c671f71ec16f4985e3"}, - {file = "pydantic-1.10.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:887ca463c3bc47103c123bc06919c86720e80e1214aab79e9b779cda0ff92a00"}, - {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdf88ab63c3ee282c76d652fc86518aacb737ff35796023fae56a65ced1a5978"}, - {file = "pydantic-1.10.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a48f1953c4a1d9bd0b5167ac50da9a79f6072c63c4cef4cf2a3736994903583e"}, - {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a9f2de23bec87ff306aef658384b02aa7c32389766af3c5dee9ce33e80222dfa"}, - {file = "pydantic-1.10.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:cd8702c5142afda03dc2b1ee6bc358b62b3735b2cce53fc77b31ca9f728e4bc8"}, - {file = "pydantic-1.10.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6e7124d6855b2780611d9f5e1e145e86667eaa3bd9459192c8dc1a097f5e9903"}, - {file = "pydantic-1.10.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b53e1d41e97063d51a02821b80538053ee4608b9a181c1005441f1673c55423"}, - {file = "pydantic-1.10.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:55b1625899acd33229c4352ce0ae54038529b412bd51c4915349b49ca575258f"}, - {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:301d626a59edbe5dfb48fcae245896379a450d04baeed50ef40d8199f2733b06"}, - {file = "pydantic-1.10.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b6f9d649892a6f54a39ed56b8dfd5e08b5f3be5f893da430bed76975f3735d15"}, - {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:d7b5a3821225f5c43496c324b0d6875fde910a1c2933d726a743ce328fbb2a8c"}, - {file = "pydantic-1.10.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f2f7eb6273dd12472d7f218e1fef6f7c7c2f00ac2e1ecde4db8824c457300416"}, - {file = "pydantic-1.10.4-cp38-cp38-win_amd64.whl", hash = "sha256:4b05697738e7d2040696b0a66d9f0a10bec0efa1883ca75ee9e55baf511909d6"}, - {file = "pydantic-1.10.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a9a6747cac06c2beb466064dda999a13176b23535e4c496c9d48e6406f92d42d"}, - {file = "pydantic-1.10.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:eb992a1ef739cc7b543576337bebfc62c0e6567434e522e97291b251a41dad7f"}, - {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:990406d226dea0e8f25f643b370224771878142155b879784ce89f633541a024"}, - {file = "pydantic-1.10.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e82a6d37a95e0b1b42b82ab340ada3963aea1317fd7f888bb6b9dfbf4fff57c"}, - {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9193d4f4ee8feca58bc56c8306bcb820f5c7905fd919e0750acdeeeef0615b28"}, - {file = "pydantic-1.10.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2b3ce5f16deb45c472dde1a0ee05619298c864a20cded09c4edd820e1454129f"}, - {file = "pydantic-1.10.4-cp39-cp39-win_amd64.whl", hash = "sha256:9cbdc268a62d9a98c56e2452d6c41c0263d64a2009aac69246486f01b4f594c4"}, - {file = "pydantic-1.10.4-py3-none-any.whl", hash = "sha256:4948f264678c703f3877d1c8877c4e3b2e12e549c57795107f08cf70c6ec7774"}, - {file = "pydantic-1.10.4.tar.gz", hash = "sha256:b9a3859f24eb4e097502a3be1fb4b2abb79b6103dd9e2e0edb70613a4459a648"}, + {file = "pydantic-1.10.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f9289065611c48147c1dd1fd344e9d57ab45f1d99b0fb26c51f1cf72cd9bcd31"}, + {file = "pydantic-1.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c32b6bba301490d9bb2bf5f631907803135e8085b6aa3e5fe5a770d46dd0160"}, + {file = "pydantic-1.10.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd9b9e98068fa1068edfc9eabde70a7132017bdd4f362f8b4fd0abed79c33083"}, + {file = "pydantic-1.10.6-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c84583b9df62522829cbc46e2b22e0ec11445625b5acd70c5681ce09c9b11c4"}, + {file = "pydantic-1.10.6-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:b41822064585fea56d0116aa431fbd5137ce69dfe837b599e310034171996084"}, + {file = "pydantic-1.10.6-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:61f1f08adfaa9cc02e0cbc94f478140385cbd52d5b3c5a657c2fceb15de8d1fb"}, + {file = "pydantic-1.10.6-cp310-cp310-win_amd64.whl", hash = "sha256:32937835e525d92c98a1512218db4eed9ddc8f4ee2a78382d77f54341972c0e7"}, + {file = "pydantic-1.10.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bbd5c531b22928e63d0cb1868dee76123456e1de2f1cb45879e9e7a3f3f1779b"}, + {file = "pydantic-1.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e277bd18339177daa62a294256869bbe84df1fb592be2716ec62627bb8d7c81d"}, + {file = "pydantic-1.10.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f15277d720aa57e173954d237628a8d304896364b9de745dcb722f584812c7"}, + {file = "pydantic-1.10.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b243b564cea2576725e77aeeda54e3e0229a168bc587d536cd69941e6797543d"}, + {file = "pydantic-1.10.6-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3ce13a558b484c9ae48a6a7c184b1ba0e5588c5525482681db418268e5f86186"}, + {file = "pydantic-1.10.6-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3ac1cd4deed871dfe0c5f63721e29debf03e2deefa41b3ed5eb5f5df287c7b70"}, + {file = "pydantic-1.10.6-cp311-cp311-win_amd64.whl", hash = "sha256:b1eb6610330a1dfba9ce142ada792f26bbef1255b75f538196a39e9e90388bf4"}, + {file = "pydantic-1.10.6-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4ca83739c1263a044ec8b79df4eefc34bbac87191f0a513d00dd47d46e307a65"}, + {file = "pydantic-1.10.6-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea4e2a7cb409951988e79a469f609bba998a576e6d7b9791ae5d1e0619e1c0f2"}, + {file = "pydantic-1.10.6-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53de12b4608290992a943801d7756f18a37b7aee284b9ffa794ee8ea8153f8e2"}, + {file = "pydantic-1.10.6-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:60184e80aac3b56933c71c48d6181e630b0fbc61ae455a63322a66a23c14731a"}, + {file = "pydantic-1.10.6-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:415a3f719ce518e95a92effc7ee30118a25c3d032455d13e121e3840985f2efd"}, + {file = "pydantic-1.10.6-cp37-cp37m-win_amd64.whl", hash = "sha256:72cb30894a34d3a7ab6d959b45a70abac8a2a93b6480fc5a7bfbd9c935bdc4fb"}, + {file = "pydantic-1.10.6-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3091d2eaeda25391405e36c2fc2ed102b48bac4b384d42b2267310abae350ca6"}, + {file = "pydantic-1.10.6-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:751f008cd2afe812a781fd6aa2fb66c620ca2e1a13b6a2152b1ad51553cb4b77"}, + {file = "pydantic-1.10.6-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12e837fd320dd30bd625be1b101e3b62edc096a49835392dcf418f1a5ac2b832"}, + {file = "pydantic-1.10.6-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:587d92831d0115874d766b1f5fddcdde0c5b6c60f8c6111a394078ec227fca6d"}, + {file = "pydantic-1.10.6-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:476f6674303ae7965730a382a8e8d7fae18b8004b7b69a56c3d8fa93968aa21c"}, + {file = "pydantic-1.10.6-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:3a2be0a0f32c83265fd71a45027201e1278beaa82ea88ea5b345eea6afa9ac7f"}, + {file = "pydantic-1.10.6-cp38-cp38-win_amd64.whl", hash = "sha256:0abd9c60eee6201b853b6c4be104edfba4f8f6c5f3623f8e1dba90634d63eb35"}, + {file = "pydantic-1.10.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6195ca908045054dd2d57eb9c39a5fe86409968b8040de8c2240186da0769da7"}, + {file = "pydantic-1.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:43cdeca8d30de9a897440e3fb8866f827c4c31f6c73838e3a01a14b03b067b1d"}, + {file = "pydantic-1.10.6-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c19eb5163167489cb1e0161ae9220dadd4fc609a42649e7e84a8fa8fff7a80f"}, + {file = "pydantic-1.10.6-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:012c99a9c0d18cfde7469aa1ebff922e24b0c706d03ead96940f5465f2c9cf62"}, + {file = "pydantic-1.10.6-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:528dcf7ec49fb5a84bf6fe346c1cc3c55b0e7603c2123881996ca3ad79db5bfc"}, + {file = "pydantic-1.10.6-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:163e79386c3547c49366e959d01e37fc30252285a70619ffc1b10ede4758250a"}, + {file = "pydantic-1.10.6-cp39-cp39-win_amd64.whl", hash = "sha256:189318051c3d57821f7233ecc94708767dd67687a614a4e8f92b4a020d4ffd06"}, + {file = "pydantic-1.10.6-py3-none-any.whl", hash = "sha256:acc6783751ac9c9bc4680379edd6d286468a1dc8d7d9906cd6f1186ed682b2b0"}, + {file = "pydantic-1.10.6.tar.gz", hash = "sha256:cf95adb0d1671fc38d8c43dd921ad5814a735e7d9b4d9e437c088002863854fd"}, ] [package.dependencies] From 099b69fb1c538387a003076b118a4813e8b776f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Mar 2023 07:37:46 -0400 Subject: [PATCH 328/344] Bump anyhow from 1.0.69 to 1.0.70 (#15288) --- Cargo.lock | 4 ++-- changelog.d/15288.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15288.misc diff --git a/Cargo.lock b/Cargo.lock index 025d22b9d429..ef6735bfc4a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.69" +version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224afbd727c3d6e4b90103ece64b8d1b67fbb1973b1046c2281eed3f3803f800" +checksum = "7de8ce5e0f9f8d88245311066a578d72b7af3e7088f32783804676302df237e4" [[package]] name = "arc-swap" diff --git a/changelog.d/15288.misc b/changelog.d/15288.misc new file mode 100644 index 000000000000..908b590595cd --- /dev/null +++ b/changelog.d/15288.misc @@ -0,0 +1 @@ +Bump anyhow from 1.0.69 to 1.0.70. From eee26138fe23a0bb67399f3fe0571e3f168c8f1c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Mar 2023 07:38:14 -0400 Subject: [PATCH 329/344] Bump serde from 1.0.155 to 1.0.157 (#15287) --- Cargo.lock | 33 ++++++++++++++++++++++----------- changelog.d/15287.misc | 1 + 2 files changed, 23 insertions(+), 11 deletions(-) create mode 100644 changelog.d/15287.misc diff --git a/Cargo.lock b/Cargo.lock index ef6735bfc4a6..2bad27154391 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -185,9 +185,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.46" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" +checksum = "1d0e1ae9e836cc3beddd63db0df682593d7e2d3d891ae8c9083d2113e1744224" dependencies = [ "unicode-ident", ] @@ -250,7 +250,7 @@ dependencies = [ "proc-macro2", "pyo3-macros-backend", "quote", - "syn", + "syn 1.0.104", ] [[package]] @@ -261,7 +261,7 @@ checksum = "c8df9be978a2d2f0cdebabb03206ed73b11314701a5bfe71b0d753b81997777f" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 1.0.104", ] [[package]] @@ -276,9 +276,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.21" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" dependencies = [ "proc-macro2", ] @@ -323,22 +323,22 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.155" +version = "1.0.157" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71f2b4817415c6d4210bfe1c7bfcf4801b2d904cb4d0e1a8fdb651013c9e86b8" +checksum = "707de5fcf5df2b5788fca98dd7eab490bc2fd9b7ef1404defc462833b83f25ca" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.155" +version = "1.0.157" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d071a94a3fac4aff69d023a7f411e33f40f3483f8c5190b1953822b6b76d7630" +checksum = "78997f4555c22a7971214540c4a661291970619afd56de19f77e0de86296e1e5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.2", ] [[package]] @@ -375,6 +375,17 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59d3276aee1fa0c33612917969b5172b5be2db051232a6e4826f1a1a9191b045" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "synapse" version = "0.1.0" diff --git a/changelog.d/15287.misc b/changelog.d/15287.misc new file mode 100644 index 000000000000..d5b80d108743 --- /dev/null +++ b/changelog.d/15287.misc @@ -0,0 +1 @@ +Bump serde from 1.0.155 to 1.0.157. From f75a041f5929b947b4abf0e30a31c885f2e01fd1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 20 Mar 2023 07:40:01 -0400 Subject: [PATCH 330/344] Bump pygithub from 1.57 to 1.58.1 (#15290) --- changelog.d/15290.misc | 1 + poetry.lock | 14 +++++++------- 2 files changed, 8 insertions(+), 7 deletions(-) create mode 100644 changelog.d/15290.misc diff --git a/changelog.d/15290.misc b/changelog.d/15290.misc new file mode 100644 index 000000000000..f23043874fb0 --- /dev/null +++ b/changelog.d/15290.misc @@ -0,0 +1 @@ +Bump pygithub from 1.57 to 1.58.1. diff --git a/poetry.lock b/poetry.lock index 9185d79cf73a..dc44e45a2cd2 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1621,25 +1621,22 @@ email = ["email-validator (>=1.0.3)"] [[package]] name = "pygithub" -version = "1.57" +version = "1.58.1" description = "Use the full Github API v3" category = "dev" optional = false python-versions = ">=3.7" files = [ - {file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"}, - {file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"}, + {file = "PyGithub-1.58.1-py3-none-any.whl", hash = "sha256:4e7fe9c3ec30d5fde5b4fbb97f18821c9dbf372bf6df337fe66f6689a65e0a83"}, + {file = "PyGithub-1.58.1.tar.gz", hash = "sha256:7d528b4ad92bc13122129fafd444ce3d04c47d2d801f6446b6e6ee2d410235b3"}, ] [package.dependencies] deprecated = "*" -pyjwt = ">=2.4.0" +pyjwt = {version = ">=2.4.0", extras = ["crypto"]} pynacl = ">=1.4.0" requests = ">=2.14.0" -[package.extras] -integrations = ["cryptography"] - [[package]] name = "pygments" version = "2.11.2" @@ -1675,6 +1672,9 @@ files = [ {file = "PyJWT-2.4.0.tar.gz", hash = "sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba"}, ] +[package.dependencies] +cryptography = {version = ">=3.3.1", optional = true, markers = "extra == \"crypto\""} + [package.extras] crypto = ["cryptography (>=3.3.1)"] dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.3.1)", "mypy", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"] From 25006acc1766ca51fba0d898b4613d2588cfffb8 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 20 Mar 2023 11:47:21 -0400 Subject: [PATCH 331/344] Add /versions flag for MSC3952. (#15293) --- changelog.d/15293.misc | 1 + synapse/rest/client/versions.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/15293.misc diff --git a/changelog.d/15293.misc b/changelog.d/15293.misc new file mode 100644 index 000000000000..57447956204a --- /dev/null +++ b/changelog.d/15293.misc @@ -0,0 +1 @@ +Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index dba0f0891ac6..ec171582b3d6 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -122,6 +122,8 @@ def on_GET(self, request: Request) -> Tuple[int, JsonDict]: is not None, # Adds support for relation-based redactions as per MSC3912. "org.matrix.msc3912": self.config.experimental.msc3912_enabled, + # Adds support for unstable "intentional mentions" behaviour. + "org.matrix.msc3952_intentional_mentions": self.config.experimental.msc3952_intentional_mentions, }, }, ) From 63e25010d619868cd96000763f3733131ef0e6ef Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 20 Mar 2023 16:28:29 +0000 Subject: [PATCH 332/344] Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). (#15281) --- .github/workflows/docker.yml | 12 +++++++++++- changelog.d/15281.docker | 1 + 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15281.docker diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 4bbe5decf88e..602f5e1759d0 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -10,6 +10,7 @@ on: permissions: contents: read + packages: write jobs: build: @@ -34,11 +35,20 @@ jobs: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Log in to GHCR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Calculate docker image tag id: set-tag uses: docker/metadata-action@master with: - images: matrixdotorg/synapse + images: | + docker.io/matrixdotorg/synapse + ghcr.io/matrix-org/synapse flavor: | latest=false tags: | diff --git a/changelog.d/15281.docker b/changelog.d/15281.docker new file mode 100644 index 000000000000..6990430ef44d --- /dev/null +++ b/changelog.d/15281.docker @@ -0,0 +1 @@ +Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). \ No newline at end of file From 5ab7146e191c4160cbecf4c6dec6a0f2ed00e171 Mon Sep 17 00:00:00 2001 From: Shay Date: Mon, 20 Mar 2023 11:14:05 -0700 Subject: [PATCH 333/344] Add Synapse-Trace-Id to access-control-expose-headers header (#14974) --- changelog.d/14974.misc | 1 + synapse/http/server.py | 4 ++++ tests/test_server.py | 4 ++++ 3 files changed, 9 insertions(+) create mode 100644 changelog.d/14974.misc diff --git a/changelog.d/14974.misc b/changelog.d/14974.misc new file mode 100644 index 000000000000..05c5f0144427 --- /dev/null +++ b/changelog.d/14974.misc @@ -0,0 +1 @@ +Add `Synapse-Trace-Id` to `access-control-expose-headers` header. diff --git a/synapse/http/server.py b/synapse/http/server.py index 9314454af106..7b760505b25a 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -892,6 +892,10 @@ def set_cors_headers(request: SynapseRequest) -> None: b"Access-Control-Allow-Headers", b"X-Requested-With, Content-Type, Authorization, Date", ) + request.setHeader( + b"Access-Control-Expose-Headers", + b"Synapse-Trace-Id", + ) def set_corp_headers(request: Request) -> None: diff --git a/tests/test_server.py b/tests/test_server.py index d67d7722a485..e266c06a2cbf 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -266,6 +266,10 @@ def _check_cors_standard_headers(self, channel: FakeChannel) -> None: [b"X-Requested-With, Content-Type, Authorization, Date"], "has correct CORS Headers header", ) + self.assertEqual( + channel.headers.getRawHeaders(b"Access-Control-Expose-Headers"), + [b"Synapse-Trace-Id"], + ) def _check_cors_msc3886_headers(self, channel: FakeChannel) -> None: # Ensure the correct CORS headers have been added From a5fb382a29991c8eafcb8c54cdd8c7aab260c237 Mon Sep 17 00:00:00 2001 From: Patrick Cloke Date: Mon, 20 Mar 2023 14:32:26 -0400 Subject: [PATCH 334/344] Separate HTTP preview code and URL previewer. (#15269) Separates REST layer code from the actual URL previewing. --- changelog.d/15269.misc | 1 + synapse/media/url_previewer.py | 833 +++++++++++++++++++++ synapse/rest/media/preview_url_resource.py | 796 +------------------- tests/rest/media/test_url_preview.py | 34 +- 4 files changed, 854 insertions(+), 810 deletions(-) create mode 100644 changelog.d/15269.misc create mode 100644 synapse/media/url_previewer.py diff --git a/changelog.d/15269.misc b/changelog.d/15269.misc new file mode 100644 index 000000000000..b3126fb1f4c9 --- /dev/null +++ b/changelog.d/15269.misc @@ -0,0 +1 @@ +Reorganize URL preview code. diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py new file mode 100644 index 000000000000..c8a4a809f129 --- /dev/null +++ b/synapse/media/url_previewer.py @@ -0,0 +1,833 @@ +# Copyright 2016 OpenMarket Ltd +# Copyright 2020-2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import datetime +import errno +import fnmatch +import logging +import os +import re +import shutil +import sys +import traceback +from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple +from urllib.parse import urljoin, urlparse, urlsplit +from urllib.request import urlopen + +import attr + +from twisted.internet.defer import Deferred +from twisted.internet.error import DNSLookupError + +from synapse.api.errors import Codes, SynapseError +from synapse.http.client import SimpleHttpClient +from synapse.logging.context import make_deferred_yieldable, run_in_background +from synapse.media._base import FileInfo, get_filename_from_headers +from synapse.media.media_storage import MediaStorage +from synapse.media.oembed import OEmbedProvider +from synapse.media.preview_html import decode_body, parse_html_to_open_graph +from synapse.metrics.background_process_metrics import run_as_background_process +from synapse.types import JsonDict, UserID +from synapse.util import json_encoder +from synapse.util.async_helpers import ObservableDeferred +from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.stringutils import random_string + +if TYPE_CHECKING: + from synapse.media.media_repository import MediaRepository + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + +OG_TAG_NAME_MAXLEN = 50 +OG_TAG_VALUE_MAXLEN = 1000 + +ONE_HOUR = 60 * 60 * 1000 +ONE_DAY = 24 * ONE_HOUR +IMAGE_CACHE_EXPIRY_MS = 2 * ONE_DAY + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class DownloadResult: + length: int + uri: str + response_code: int + media_type: str + download_name: Optional[str] + expires: int + etag: Optional[str] + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class MediaInfo: + """ + Information parsed from downloading media being previewed. + """ + + # The Content-Type header of the response. + media_type: str + # The length (in bytes) of the downloaded media. + media_length: int + # The media filename, according to the server. This is parsed from the + # returned headers, if possible. + download_name: Optional[str] + # The time of the preview. + created_ts_ms: int + # Information from the media storage provider about where the file is stored + # on disk. + filesystem_id: str + filename: str + # The URI being previewed. + uri: str + # The HTTP response code. + response_code: int + # The timestamp (in milliseconds) of when this preview expires. + expires: int + # The ETag header of the response. + etag: Optional[str] + + +class UrlPreviewer: + """ + Generates an Open Graph (https://ogp.me/) responses (with some Matrix + specific additions) for a given URL. + + When Synapse is asked to preview a URL it does the following: + + 1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the + config). + 2. Checks the URL against an in-memory cache and returns the result if it exists. (This + is also used to de-duplicate processing of multiple in-flight requests at once.) + 3. Kicks off a background process to generate a preview: + 1. Checks URL and timestamp against the database cache and returns the result if it + has not expired and was successful (a 2xx return code). + 2. Checks if the URL matches an oEmbed (https://oembed.com/) pattern. If it + does, update the URL to download. + 3. Downloads the URL and stores it into a file via the media storage provider + and saves the local media metadata. + 4. If the media is an image: + 1. Generates thumbnails. + 2. Generates an Open Graph response based on image properties. + 5. If the media is HTML: + 1. Decodes the HTML via the stored file. + 2. Generates an Open Graph response from the HTML. + 3. If a JSON oEmbed URL was found in the HTML via autodiscovery: + 1. Downloads the URL and stores it into a file via the media storage provider + and saves the local media metadata. + 2. Convert the oEmbed response to an Open Graph response. + 3. Override any Open Graph data from the HTML with data from oEmbed. + 4. If an image exists in the Open Graph response: + 1. Downloads the URL and stores it into a file via the media storage + provider and saves the local media metadata. + 2. Generates thumbnails. + 3. Updates the Open Graph response based on image properties. + 6. If the media is JSON and an oEmbed URL was found: + 1. Convert the oEmbed response to an Open Graph response. + 2. If a thumbnail or image is in the oEmbed response: + 1. Downloads the URL and stores it into a file via the media storage + provider and saves the local media metadata. + 2. Generates thumbnails. + 3. Updates the Open Graph response based on image properties. + 7. Stores the result in the database cache. + 4. Returns the result. + + If any additional requests (e.g. from oEmbed autodiscovery, step 5.3 or + image thumbnailing, step 5.4 or 6.4) fails then the URL preview as a whole + does not fail. As much information as possible is returned. + + The in-memory cache expires after 1 hour. + + Expired entries in the database cache (and their associated media files) are + deleted every 10 seconds. The default expiration time is 1 hour from download. + """ + + def __init__( + self, + hs: "HomeServer", + media_repo: "MediaRepository", + media_storage: MediaStorage, + ): + self.clock = hs.get_clock() + self.filepaths = media_repo.filepaths + self.max_spider_size = hs.config.media.max_spider_size + self.server_name = hs.hostname + self.store = hs.get_datastores().main + self.client = SimpleHttpClient( + hs, + treq_args={"browser_like_redirects": True}, + ip_whitelist=hs.config.media.url_preview_ip_range_whitelist, + ip_blacklist=hs.config.media.url_preview_ip_range_blacklist, + use_proxy=True, + ) + self.media_repo = media_repo + self.primary_base_path = media_repo.primary_base_path + self.media_storage = media_storage + + self._oembed = OEmbedProvider(hs) + + # We run the background jobs if we're the instance specified (or no + # instance is specified, where we assume there is only one instance + # serving media). + instance_running_jobs = hs.config.media.media_instance_running_background_jobs + self._worker_run_media_background_jobs = ( + instance_running_jobs is None + or instance_running_jobs == hs.get_instance_name() + ) + + self.url_preview_url_blacklist = hs.config.media.url_preview_url_blacklist + self.url_preview_accept_language = hs.config.media.url_preview_accept_language + + # memory cache mapping urls to an ObservableDeferred returning + # JSON-encoded OG metadata + self._cache: ExpiringCache[str, ObservableDeferred] = ExpiringCache( + cache_name="url_previews", + clock=self.clock, + # don't spider URLs more often than once an hour + expiry_ms=ONE_HOUR, + ) + + if self._worker_run_media_background_jobs: + self._cleaner_loop = self.clock.looping_call( + self._start_expire_url_cache_data, 10 * 1000 + ) + + async def preview(self, url: str, user: UserID, ts: int) -> bytes: + # XXX: we could move this into _do_preview if we wanted. + url_tuple = urlsplit(url) + for entry in self.url_preview_url_blacklist: + match = True + for attrib in entry: + pattern = entry[attrib] + value = getattr(url_tuple, attrib) + logger.debug( + "Matching attrib '%s' with value '%s' against pattern '%s'", + attrib, + value, + pattern, + ) + + if value is None: + match = False + continue + + # Some attributes might not be parsed as strings by urlsplit (such as the + # port, which is parsed as an int). Because we use match functions that + # expect strings, we want to make sure that's what we give them. + value_str = str(value) + + if pattern.startswith("^"): + if not re.match(pattern, value_str): + match = False + continue + else: + if not fnmatch.fnmatch(value_str, pattern): + match = False + continue + if match: + logger.warning("URL %s blocked by url_blacklist entry %s", url, entry) + raise SynapseError( + 403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN + ) + + # the in-memory cache: + # * ensures that only one request is active at a time + # * takes load off the DB for the thundering herds + # * also caches any failures (unlike the DB) so we don't keep + # requesting the same endpoint + + observable = self._cache.get(url) + + if not observable: + download = run_in_background(self._do_preview, url, user, ts) + observable = ObservableDeferred(download, consumeErrors=True) + self._cache[url] = observable + else: + logger.info("Returning cached response") + + return await make_deferred_yieldable(observable.observe()) + + async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: + """Check the db, and download the URL and build a preview + + Args: + url: The URL to preview. + user: The user requesting the preview. + ts: The timestamp requested for the preview. + + Returns: + json-encoded og data + """ + # check the URL cache in the DB (which will also provide us with + # historical previews, if we have any) + cache_result = await self.store.get_url_cache(url, ts) + if ( + cache_result + and cache_result["expires_ts"] > ts + and cache_result["response_code"] / 100 == 2 + ): + # It may be stored as text in the database, not as bytes (such as + # PostgreSQL). If so, encode it back before handing it on. + og = cache_result["og"] + if isinstance(og, str): + og = og.encode("utf8") + return og + + # If this URL can be accessed via oEmbed, use that instead. + url_to_download = url + oembed_url = self._oembed.get_oembed_url(url) + if oembed_url: + url_to_download = oembed_url + + media_info = await self._handle_url(url_to_download, user) + + logger.debug("got media_info of '%s'", media_info) + + # The number of milliseconds that the response should be considered valid. + expiration_ms = media_info.expires + author_name: Optional[str] = None + + if _is_media(media_info.media_type): + file_id = media_info.filesystem_id + dims = await self.media_repo._generate_thumbnails( + None, file_id, file_id, media_info.media_type, url_cache=True + ) + + og = { + "og:description": media_info.download_name, + "og:image": f"mxc://{self.server_name}/{media_info.filesystem_id}", + "og:image:type": media_info.media_type, + "matrix:image:size": media_info.media_length, + } + + if dims: + og["og:image:width"] = dims["width"] + og["og:image:height"] = dims["height"] + else: + logger.warning("Couldn't get dims for %s" % url) + + # define our OG response for this media + elif _is_html(media_info.media_type): + # TODO: somehow stop a big HTML tree from exploding synapse's RAM + + with open(media_info.filename, "rb") as file: + body = file.read() + + tree = decode_body(body, media_info.uri, media_info.media_type) + if tree is not None: + # Check if this HTML document points to oEmbed information and + # defer to that. + oembed_url = self._oembed.autodiscover_from_html(tree) + og_from_oembed: JsonDict = {} + if oembed_url: + try: + oembed_info = await self._handle_url( + oembed_url, user, allow_data_urls=True + ) + except Exception as e: + # Fetching the oEmbed info failed, don't block the entire URL preview. + logger.warning( + "oEmbed fetch failed during URL preview: %s errored with %s", + oembed_url, + e, + ) + else: + ( + og_from_oembed, + author_name, + expiration_ms, + ) = await self._handle_oembed_response( + url, oembed_info, expiration_ms + ) + + # Parse Open Graph information from the HTML in case the oEmbed + # response failed or is incomplete. + og_from_html = parse_html_to_open_graph(tree) + + # Compile the Open Graph response by using the scraped + # information from the HTML and overlaying any information + # from the oEmbed response. + og = {**og_from_html, **og_from_oembed} + + await self._precache_image_url(user, media_info, og) + else: + og = {} + + elif oembed_url: + # Handle the oEmbed information. + og, author_name, expiration_ms = await self._handle_oembed_response( + url, media_info, expiration_ms + ) + await self._precache_image_url(user, media_info, og) + + else: + logger.warning("Failed to find any OG data in %s", url) + og = {} + + # If we don't have a title but we have author_name, copy it as + # title + if not og.get("og:title") and author_name: + og["og:title"] = author_name + + # filter out any stupidly long values + keys_to_remove = [] + for k, v in og.items(): + # values can be numeric as well as strings, hence the cast to str + if len(k) > OG_TAG_NAME_MAXLEN or len(str(v)) > OG_TAG_VALUE_MAXLEN: + logger.warning( + "Pruning overlong tag %s from OG data", k[:OG_TAG_NAME_MAXLEN] + ) + keys_to_remove.append(k) + for k in keys_to_remove: + del og[k] + + logger.debug("Calculated OG for %s as %s", url, og) + + jsonog = json_encoder.encode(og) + + # Cap the amount of time to consider a response valid. + expiration_ms = min(expiration_ms, ONE_DAY) + + # store OG in history-aware DB cache + await self.store.store_url_cache( + url, + media_info.response_code, + media_info.etag, + media_info.created_ts_ms + expiration_ms, + jsonog, + media_info.filesystem_id, + media_info.created_ts_ms, + ) + + return jsonog.encode("utf8") + + async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResult: + """ + Fetches a remote URL and parses the headers. + + Args: + url: The URL to fetch. + output_stream: The stream to write the content to. + + Returns: + A tuple of: + Media length, URL downloaded, the HTTP response code, + the media type, the downloaded file name, the number of + milliseconds the result is valid for, the etag header. + """ + + try: + logger.debug("Trying to get preview for url '%s'", url) + length, headers, uri, code = await self.client.get_file( + url, + output_stream=output_stream, + max_size=self.max_spider_size, + headers={ + b"Accept-Language": self.url_preview_accept_language, + # Use a custom user agent for the preview because some sites will only return + # Open Graph metadata to crawler user agents. Omit the Synapse version + # string to avoid leaking information. + b"User-Agent": [ + "Synapse (bot; +https://github.com/matrix-org/synapse)" + ], + }, + is_allowed_content_type=_is_previewable, + ) + except SynapseError: + # Pass SynapseErrors through directly, so that the servlet + # handler will return a SynapseError to the client instead of + # blank data or a 500. + raise + except DNSLookupError: + # DNS lookup returned no results + # Note: This will also be the case if one of the resolved IP + # addresses is blacklisted + raise SynapseError( + 502, + "DNS resolution failure during URL preview generation", + Codes.UNKNOWN, + ) + except Exception as e: + # FIXME: pass through 404s and other error messages nicely + logger.warning("Error downloading %s: %r", url, e) + + raise SynapseError( + 500, + "Failed to download content: %s" + % (traceback.format_exception_only(sys.exc_info()[0], e),), + Codes.UNKNOWN, + ) + + if b"Content-Type" in headers: + media_type = headers[b"Content-Type"][0].decode("ascii") + else: + media_type = "application/octet-stream" + + download_name = get_filename_from_headers(headers) + + # FIXME: we should calculate a proper expiration based on the + # Cache-Control and Expire headers. But for now, assume 1 hour. + expires = ONE_HOUR + etag = headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None + + return DownloadResult( + length, uri, code, media_type, download_name, expires, etag + ) + + async def _parse_data_url( + self, url: str, output_stream: BinaryIO + ) -> DownloadResult: + """ + Parses a data: URL. + + Args: + url: The URL to parse. + output_stream: The stream to write the content to. + + Returns: + A tuple of: + Media length, URL downloaded, the HTTP response code, + the media type, the downloaded file name, the number of + milliseconds the result is valid for, the etag header. + """ + + try: + logger.debug("Trying to parse data url '%s'", url) + with urlopen(url) as url_info: + # TODO Can this be more efficient. + output_stream.write(url_info.read()) + except Exception as e: + logger.warning("Error parsing data: URL %s: %r", url, e) + + raise SynapseError( + 500, + "Failed to parse data URL: %s" + % (traceback.format_exception_only(sys.exc_info()[0], e),), + Codes.UNKNOWN, + ) + + return DownloadResult( + # Read back the length that has been written. + length=output_stream.tell(), + uri=url, + # If it was parsed, consider this a 200 OK. + response_code=200, + # urlopen shoves the media-type from the data URL into the content type + # header object. + media_type=url_info.headers.get_content_type(), + # Some features are not supported by data: URLs. + download_name=None, + expires=ONE_HOUR, + etag=None, + ) + + async def _handle_url( + self, url: str, user: UserID, allow_data_urls: bool = False + ) -> MediaInfo: + """ + Fetches content from a URL and parses the result to generate a MediaInfo. + + It uses the media storage provider to persist the fetched content and + stores the mapping into the database. + + Args: + url: The URL to fetch. + user: The user who ahs requested this URL. + allow_data_urls: True if data URLs should be allowed. + + Returns: + A MediaInfo object describing the fetched content. + """ + + # TODO: we should probably honour robots.txt... except in practice + # we're most likely being explicitly triggered by a human rather than a + # bot, so are we really a robot? + + file_id = datetime.date.today().isoformat() + "_" + random_string(16) + + file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True) + + with self.media_storage.store_into_file(file_info) as (f, fname, finish): + if url.startswith("data:"): + if not allow_data_urls: + raise SynapseError( + 500, "Previewing of data: URLs is forbidden", Codes.UNKNOWN + ) + + download_result = await self._parse_data_url(url, f) + else: + download_result = await self._download_url(url, f) + + await finish() + + try: + time_now_ms = self.clock.time_msec() + + await self.store.store_local_media( + media_id=file_id, + media_type=download_result.media_type, + time_now_ms=time_now_ms, + upload_name=download_result.download_name, + media_length=download_result.length, + user_id=user, + url_cache=url, + ) + + except Exception as e: + logger.error("Error handling downloaded %s: %r", url, e) + # TODO: we really ought to delete the downloaded file in this + # case, since we won't have recorded it in the db, and will + # therefore not expire it. + raise + + return MediaInfo( + media_type=download_result.media_type, + media_length=download_result.length, + download_name=download_result.download_name, + created_ts_ms=time_now_ms, + filesystem_id=file_id, + filename=fname, + uri=download_result.uri, + response_code=download_result.response_code, + expires=download_result.expires, + etag=download_result.etag, + ) + + async def _precache_image_url( + self, user: UserID, media_info: MediaInfo, og: JsonDict + ) -> None: + """ + Pre-cache the image (if one exists) for posterity + + Args: + user: The user requesting the preview. + media_info: The media being previewed. + og: The Open Graph dictionary. This is modified with image information. + """ + # If there's no image or it is blank, there's nothing to do. + if "og:image" not in og: + return + + # Remove the raw image URL, this will be replaced with an MXC URL, if successful. + image_url = og.pop("og:image") + if not image_url: + return + + # The image URL from the HTML might be relative to the previewed page, + # convert it to an URL which can be requested directly. + url_parts = urlparse(image_url) + if url_parts.scheme != "data": + image_url = urljoin(media_info.uri, image_url) + + # FIXME: it might be cleaner to use the same flow as the main /preview_url + # request itself and benefit from the same caching etc. But for now we + # just rely on the caching on the master request to speed things up. + try: + image_info = await self._handle_url(image_url, user, allow_data_urls=True) + except Exception as e: + # Pre-caching the image failed, don't block the entire URL preview. + logger.warning( + "Pre-caching image failed during URL preview: %s errored with %s", + image_url, + e, + ) + return + + if _is_media(image_info.media_type): + # TODO: make sure we don't choke on white-on-transparent images + file_id = image_info.filesystem_id + dims = await self.media_repo._generate_thumbnails( + None, file_id, file_id, image_info.media_type, url_cache=True + ) + if dims: + og["og:image:width"] = dims["width"] + og["og:image:height"] = dims["height"] + else: + logger.warning("Couldn't get dims for %s", image_url) + + og["og:image"] = f"mxc://{self.server_name}/{image_info.filesystem_id}" + og["og:image:type"] = image_info.media_type + og["matrix:image:size"] = image_info.media_length + + async def _handle_oembed_response( + self, url: str, media_info: MediaInfo, expiration_ms: int + ) -> Tuple[JsonDict, Optional[str], int]: + """ + Parse the downloaded oEmbed info. + + Args: + url: The URL which is being previewed (not the one which was + requested). + media_info: The media being previewed. + expiration_ms: The length of time, in milliseconds, the media is valid for. + + Returns: + A tuple of: + The Open Graph dictionary, if the oEmbed info can be parsed. + The author name if it could be retrieved from oEmbed. + The (possibly updated) length of time, in milliseconds, the media is valid for. + """ + # If JSON was not returned, there's nothing to do. + if not _is_json(media_info.media_type): + return {}, None, expiration_ms + + with open(media_info.filename, "rb") as file: + body = file.read() + + oembed_response = self._oembed.parse_oembed_response(url, body) + open_graph_result = oembed_response.open_graph_result + + # Use the cache age from the oEmbed result, if one was given. + if open_graph_result and oembed_response.cache_age is not None: + expiration_ms = oembed_response.cache_age + + return open_graph_result, oembed_response.author_name, expiration_ms + + def _start_expire_url_cache_data(self) -> Deferred: + return run_as_background_process( + "expire_url_cache_data", self._expire_url_cache_data + ) + + async def _expire_url_cache_data(self) -> None: + """Clean up expired url cache content, media and thumbnails.""" + + assert self._worker_run_media_background_jobs + + now = self.clock.time_msec() + + logger.debug("Running url preview cache expiry") + + def try_remove_parent_dirs(dirs: Iterable[str]) -> None: + """Attempt to remove the given chain of parent directories + + Args: + dirs: The list of directory paths to delete, with children appearing + before their parents. + """ + for dir in dirs: + try: + os.rmdir(dir) + except FileNotFoundError: + # Already deleted, continue with deleting the rest + pass + except OSError as e: + # Failed, skip deleting the rest of the parent dirs + if e.errno != errno.ENOTEMPTY: + logger.warning( + "Failed to remove media directory while clearing url preview cache: %r: %s", + dir, + e, + ) + break + + # First we delete expired url cache entries + media_ids = await self.store.get_expired_url_cache(now) + + removed_media = [] + for media_id in media_ids: + fname = self.filepaths.url_cache_filepath(media_id) + try: + os.remove(fname) + except FileNotFoundError: + pass # If the path doesn't exist, meh + except OSError as e: + logger.warning( + "Failed to remove media while clearing url preview cache: %r: %s", + media_id, + e, + ) + continue + + removed_media.append(media_id) + + dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) + try_remove_parent_dirs(dirs) + + await self.store.delete_url_cache(removed_media) + + if removed_media: + logger.debug( + "Deleted %d entries from url preview cache", len(removed_media) + ) + else: + logger.debug("No entries removed from url preview cache") + + # Now we delete old images associated with the url cache. + # These may be cached for a bit on the client (i.e., they + # may have a room open with a preview url thing open). + # So we wait a couple of days before deleting, just in case. + expire_before = now - IMAGE_CACHE_EXPIRY_MS + media_ids = await self.store.get_url_cache_media_before(expire_before) + + removed_media = [] + for media_id in media_ids: + fname = self.filepaths.url_cache_filepath(media_id) + try: + os.remove(fname) + except FileNotFoundError: + pass # If the path doesn't exist, meh + except OSError as e: + logger.warning( + "Failed to remove media from url preview cache: %r: %s", media_id, e + ) + continue + + dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) + try_remove_parent_dirs(dirs) + + thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id) + try: + shutil.rmtree(thumbnail_dir) + except FileNotFoundError: + pass # If the path doesn't exist, meh + except OSError as e: + logger.warning( + "Failed to remove media from url preview cache: %r: %s", media_id, e + ) + continue + + removed_media.append(media_id) + + dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id) + # Note that one of the directories to be deleted has already been + # removed by the `rmtree` above. + try_remove_parent_dirs(dirs) + + await self.store.delete_url_cache_media(removed_media) + + if removed_media: + logger.debug("Deleted %d media from url preview cache", len(removed_media)) + else: + logger.debug("No media removed from url preview cache") + + +def _is_media(content_type: str) -> bool: + return content_type.lower().startswith("image/") + + +def _is_html(content_type: str) -> bool: + content_type = content_type.lower() + return content_type.startswith("text/html") or content_type.startswith( + "application/xhtml" + ) + + +def _is_json(content_type: str) -> bool: + return content_type.lower().startswith("application/json") + + +def _is_previewable(content_type: str) -> bool: + """Returns True for content types for which we will perform URL preview and False + otherwise.""" + + return _is_html(content_type) or _is_media(content_type) or _is_json(content_type) diff --git a/synapse/rest/media/preview_url_resource.py b/synapse/rest/media/preview_url_resource.py index 7ada72875755..58513c4be43f 100644 --- a/synapse/rest/media/preview_url_resource.py +++ b/synapse/rest/media/preview_url_resource.py @@ -12,26 +12,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import datetime -import errno -import fnmatch -import logging -import os -import re -import shutil -import sys -import traceback -from typing import TYPE_CHECKING, BinaryIO, Iterable, Optional, Tuple -from urllib.parse import urljoin, urlparse, urlsplit -from urllib.request import urlopen -import attr +from typing import TYPE_CHECKING -from twisted.internet.defer import Deferred -from twisted.internet.error import DNSLookupError - -from synapse.api.errors import Codes, SynapseError -from synapse.http.client import SimpleHttpClient from synapse.http.server import ( DirectServeJsonResource, respond_with_json, @@ -39,71 +22,13 @@ ) from synapse.http.servlet import parse_integer, parse_string from synapse.http.site import SynapseRequest -from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.media._base import FileInfo, get_filename_from_headers from synapse.media.media_storage import MediaStorage -from synapse.media.oembed import OEmbedProvider -from synapse.media.preview_html import decode_body, parse_html_to_open_graph -from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.types import JsonDict, UserID -from synapse.util import json_encoder -from synapse.util.async_helpers import ObservableDeferred -from synapse.util.caches.expiringcache import ExpiringCache -from synapse.util.stringutils import random_string +from synapse.media.url_previewer import UrlPreviewer if TYPE_CHECKING: from synapse.media.media_repository import MediaRepository from synapse.server import HomeServer -logger = logging.getLogger(__name__) - -OG_TAG_NAME_MAXLEN = 50 -OG_TAG_VALUE_MAXLEN = 1000 - -ONE_HOUR = 60 * 60 * 1000 -ONE_DAY = 24 * ONE_HOUR -IMAGE_CACHE_EXPIRY_MS = 2 * ONE_DAY - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class DownloadResult: - length: int - uri: str - response_code: int - media_type: str - download_name: Optional[str] - expires: int - etag: Optional[str] - - -@attr.s(slots=True, frozen=True, auto_attribs=True) -class MediaInfo: - """ - Information parsed from downloading media being previewed. - """ - - # The Content-Type header of the response. - media_type: str - # The length (in bytes) of the downloaded media. - media_length: int - # The media filename, according to the server. This is parsed from the - # returned headers, if possible. - download_name: Optional[str] - # The time of the preview. - created_ts_ms: int - # Information from the media storage provider about where the file is stored - # on disk. - filesystem_id: str - filename: str - # The URI being previewed. - uri: str - # The HTTP response code. - response_code: int - # The timestamp (in milliseconds) of when this preview expires. - expires: int - # The ETag header of the response. - etag: Optional[str] - class PreviewUrlResource(DirectServeJsonResource): """ @@ -121,54 +46,6 @@ class PreviewUrlResource(DirectServeJsonResource): * The URL metadata must be stored somewhere, rather than just using Matrix itself to store the media. * Matrix cannot be used to distribute the metadata between homeservers. - - When Synapse is asked to preview a URL it does the following: - - 1. Checks against a URL blacklist (defined as `url_preview_url_blacklist` in the - config). - 2. Checks the URL against an in-memory cache and returns the result if it exists. (This - is also used to de-duplicate processing of multiple in-flight requests at once.) - 3. Kicks off a background process to generate a preview: - 1. Checks URL and timestamp against the database cache and returns the result if it - has not expired and was successful (a 2xx return code). - 2. Checks if the URL matches an oEmbed (https://oembed.com/) pattern. If it - does, update the URL to download. - 3. Downloads the URL and stores it into a file via the media storage provider - and saves the local media metadata. - 4. If the media is an image: - 1. Generates thumbnails. - 2. Generates an Open Graph response based on image properties. - 5. If the media is HTML: - 1. Decodes the HTML via the stored file. - 2. Generates an Open Graph response from the HTML. - 3. If a JSON oEmbed URL was found in the HTML via autodiscovery: - 1. Downloads the URL and stores it into a file via the media storage provider - and saves the local media metadata. - 2. Convert the oEmbed response to an Open Graph response. - 3. Override any Open Graph data from the HTML with data from oEmbed. - 4. If an image exists in the Open Graph response: - 1. Downloads the URL and stores it into a file via the media storage - provider and saves the local media metadata. - 2. Generates thumbnails. - 3. Updates the Open Graph response based on image properties. - 6. If the media is JSON and an oEmbed URL was found: - 1. Convert the oEmbed response to an Open Graph response. - 2. If a thumbnail or image is in the oEmbed response: - 1. Downloads the URL and stores it into a file via the media storage - provider and saves the local media metadata. - 2. Generates thumbnails. - 3. Updates the Open Graph response based on image properties. - 7. Stores the result in the database cache. - 4. Returns the result. - - If any additional requests (e.g. from oEmbed autodiscovery, step 5.3 or - image thumbnailing, step 5.4 or 6.4) fails then the URL preview as a whole - does not fail. As much information as possible is returned. - - The in-memory cache expires after 1 hour. - - Expired entries in the database cache (and their associated media files) are - deleted every 10 seconds. The default expiration time is 1 hour from download. """ isLeaf = True @@ -183,48 +60,10 @@ def __init__( self.auth = hs.get_auth() self.clock = hs.get_clock() - self.filepaths = media_repo.filepaths - self.max_spider_size = hs.config.media.max_spider_size - self.server_name = hs.hostname - self.store = hs.get_datastores().main - self.client = SimpleHttpClient( - hs, - treq_args={"browser_like_redirects": True}, - ip_whitelist=hs.config.media.url_preview_ip_range_whitelist, - ip_blacklist=hs.config.media.url_preview_ip_range_blacklist, - use_proxy=True, - ) self.media_repo = media_repo - self.primary_base_path = media_repo.primary_base_path self.media_storage = media_storage - self._oembed = OEmbedProvider(hs) - - # We run the background jobs if we're the instance specified (or no - # instance is specified, where we assume there is only one instance - # serving media). - instance_running_jobs = hs.config.media.media_instance_running_background_jobs - self._worker_run_media_background_jobs = ( - instance_running_jobs is None - or instance_running_jobs == hs.get_instance_name() - ) - - self.url_preview_url_blacklist = hs.config.media.url_preview_url_blacklist - self.url_preview_accept_language = hs.config.media.url_preview_accept_language - - # memory cache mapping urls to an ObservableDeferred returning - # JSON-encoded OG metadata - self._cache: ExpiringCache[str, ObservableDeferred] = ExpiringCache( - cache_name="url_previews", - clock=self.clock, - # don't spider URLs more often than once an hour - expiry_ms=ONE_HOUR, - ) - - if self._worker_run_media_background_jobs: - self._cleaner_loop = self.clock.looping_call( - self._start_expire_url_cache_data, 10 * 1000 - ) + self._url_previewer = UrlPreviewer(hs, media_repo, media_storage) async def _async_render_OPTIONS(self, request: SynapseRequest) -> None: request.setHeader(b"Allow", b"OPTIONS, GET") @@ -238,632 +77,5 @@ async def _async_render_GET(self, request: SynapseRequest) -> None: if ts is None: ts = self.clock.time_msec() - # XXX: we could move this into _do_preview if we wanted. - url_tuple = urlsplit(url) - for entry in self.url_preview_url_blacklist: - match = True - for attrib in entry: - pattern = entry[attrib] - value = getattr(url_tuple, attrib) - logger.debug( - "Matching attrib '%s' with value '%s' against pattern '%s'", - attrib, - value, - pattern, - ) - - if value is None: - match = False - continue - - # Some attributes might not be parsed as strings by urlsplit (such as the - # port, which is parsed as an int). Because we use match functions that - # expect strings, we want to make sure that's what we give them. - value_str = str(value) - - if pattern.startswith("^"): - if not re.match(pattern, value_str): - match = False - continue - else: - if not fnmatch.fnmatch(value_str, pattern): - match = False - continue - if match: - logger.warning("URL %s blocked by url_blacklist entry %s", url, entry) - raise SynapseError( - 403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN - ) - - # the in-memory cache: - # * ensures that only one request is active at a time - # * takes load off the DB for the thundering herds - # * also caches any failures (unlike the DB) so we don't keep - # requesting the same endpoint - - observable = self._cache.get(url) - - if not observable: - download = run_in_background(self._do_preview, url, requester.user, ts) - observable = ObservableDeferred(download, consumeErrors=True) - self._cache[url] = observable - else: - logger.info("Returning cached response") - - og = await make_deferred_yieldable(observable.observe()) + og = await self._url_previewer.preview(url, requester.user, ts) respond_with_json_bytes(request, 200, og, send_cors=True) - - async def _do_preview(self, url: str, user: UserID, ts: int) -> bytes: - """Check the db, and download the URL and build a preview - - Args: - url: The URL to preview. - user: The user requesting the preview. - ts: The timestamp requested for the preview. - - Returns: - json-encoded og data - """ - # check the URL cache in the DB (which will also provide us with - # historical previews, if we have any) - cache_result = await self.store.get_url_cache(url, ts) - if ( - cache_result - and cache_result["expires_ts"] > ts - and cache_result["response_code"] / 100 == 2 - ): - # It may be stored as text in the database, not as bytes (such as - # PostgreSQL). If so, encode it back before handing it on. - og = cache_result["og"] - if isinstance(og, str): - og = og.encode("utf8") - return og - - # If this URL can be accessed via oEmbed, use that instead. - url_to_download = url - oembed_url = self._oembed.get_oembed_url(url) - if oembed_url: - url_to_download = oembed_url - - media_info = await self._handle_url(url_to_download, user) - - logger.debug("got media_info of '%s'", media_info) - - # The number of milliseconds that the response should be considered valid. - expiration_ms = media_info.expires - author_name: Optional[str] = None - - if _is_media(media_info.media_type): - file_id = media_info.filesystem_id - dims = await self.media_repo._generate_thumbnails( - None, file_id, file_id, media_info.media_type, url_cache=True - ) - - og = { - "og:description": media_info.download_name, - "og:image": f"mxc://{self.server_name}/{media_info.filesystem_id}", - "og:image:type": media_info.media_type, - "matrix:image:size": media_info.media_length, - } - - if dims: - og["og:image:width"] = dims["width"] - og["og:image:height"] = dims["height"] - else: - logger.warning("Couldn't get dims for %s" % url) - - # define our OG response for this media - elif _is_html(media_info.media_type): - # TODO: somehow stop a big HTML tree from exploding synapse's RAM - - with open(media_info.filename, "rb") as file: - body = file.read() - - tree = decode_body(body, media_info.uri, media_info.media_type) - if tree is not None: - # Check if this HTML document points to oEmbed information and - # defer to that. - oembed_url = self._oembed.autodiscover_from_html(tree) - og_from_oembed: JsonDict = {} - if oembed_url: - try: - oembed_info = await self._handle_url( - oembed_url, user, allow_data_urls=True - ) - except Exception as e: - # Fetching the oEmbed info failed, don't block the entire URL preview. - logger.warning( - "oEmbed fetch failed during URL preview: %s errored with %s", - oembed_url, - e, - ) - else: - ( - og_from_oembed, - author_name, - expiration_ms, - ) = await self._handle_oembed_response( - url, oembed_info, expiration_ms - ) - - # Parse Open Graph information from the HTML in case the oEmbed - # response failed or is incomplete. - og_from_html = parse_html_to_open_graph(tree) - - # Compile the Open Graph response by using the scraped - # information from the HTML and overlaying any information - # from the oEmbed response. - og = {**og_from_html, **og_from_oembed} - - await self._precache_image_url(user, media_info, og) - else: - og = {} - - elif oembed_url: - # Handle the oEmbed information. - og, author_name, expiration_ms = await self._handle_oembed_response( - url, media_info, expiration_ms - ) - await self._precache_image_url(user, media_info, og) - - else: - logger.warning("Failed to find any OG data in %s", url) - og = {} - - # If we don't have a title but we have author_name, copy it as - # title - if not og.get("og:title") and author_name: - og["og:title"] = author_name - - # filter out any stupidly long values - keys_to_remove = [] - for k, v in og.items(): - # values can be numeric as well as strings, hence the cast to str - if len(k) > OG_TAG_NAME_MAXLEN or len(str(v)) > OG_TAG_VALUE_MAXLEN: - logger.warning( - "Pruning overlong tag %s from OG data", k[:OG_TAG_NAME_MAXLEN] - ) - keys_to_remove.append(k) - for k in keys_to_remove: - del og[k] - - logger.debug("Calculated OG for %s as %s", url, og) - - jsonog = json_encoder.encode(og) - - # Cap the amount of time to consider a response valid. - expiration_ms = min(expiration_ms, ONE_DAY) - - # store OG in history-aware DB cache - await self.store.store_url_cache( - url, - media_info.response_code, - media_info.etag, - media_info.created_ts_ms + expiration_ms, - jsonog, - media_info.filesystem_id, - media_info.created_ts_ms, - ) - - return jsonog.encode("utf8") - - async def _download_url(self, url: str, output_stream: BinaryIO) -> DownloadResult: - """ - Fetches a remote URL and parses the headers. - - Args: - url: The URL to fetch. - output_stream: The stream to write the content to. - - Returns: - A tuple of: - Media length, URL downloaded, the HTTP response code, - the media type, the downloaded file name, the number of - milliseconds the result is valid for, the etag header. - """ - - try: - logger.debug("Trying to get preview for url '%s'", url) - length, headers, uri, code = await self.client.get_file( - url, - output_stream=output_stream, - max_size=self.max_spider_size, - headers={ - b"Accept-Language": self.url_preview_accept_language, - # Use a custom user agent for the preview because some sites will only return - # Open Graph metadata to crawler user agents. Omit the Synapse version - # string to avoid leaking information. - b"User-Agent": [ - "Synapse (bot; +https://github.com/matrix-org/synapse)" - ], - }, - is_allowed_content_type=_is_previewable, - ) - except SynapseError: - # Pass SynapseErrors through directly, so that the servlet - # handler will return a SynapseError to the client instead of - # blank data or a 500. - raise - except DNSLookupError: - # DNS lookup returned no results - # Note: This will also be the case if one of the resolved IP - # addresses is blacklisted - raise SynapseError( - 502, - "DNS resolution failure during URL preview generation", - Codes.UNKNOWN, - ) - except Exception as e: - # FIXME: pass through 404s and other error messages nicely - logger.warning("Error downloading %s: %r", url, e) - - raise SynapseError( - 500, - "Failed to download content: %s" - % (traceback.format_exception_only(sys.exc_info()[0], e),), - Codes.UNKNOWN, - ) - - if b"Content-Type" in headers: - media_type = headers[b"Content-Type"][0].decode("ascii") - else: - media_type = "application/octet-stream" - - download_name = get_filename_from_headers(headers) - - # FIXME: we should calculate a proper expiration based on the - # Cache-Control and Expire headers. But for now, assume 1 hour. - expires = ONE_HOUR - etag = headers[b"ETag"][0].decode("ascii") if b"ETag" in headers else None - - return DownloadResult( - length, uri, code, media_type, download_name, expires, etag - ) - - async def _parse_data_url( - self, url: str, output_stream: BinaryIO - ) -> DownloadResult: - """ - Parses a data: URL. - - Args: - url: The URL to parse. - output_stream: The stream to write the content to. - - Returns: - A tuple of: - Media length, URL downloaded, the HTTP response code, - the media type, the downloaded file name, the number of - milliseconds the result is valid for, the etag header. - """ - - try: - logger.debug("Trying to parse data url '%s'", url) - with urlopen(url) as url_info: - # TODO Can this be more efficient. - output_stream.write(url_info.read()) - except Exception as e: - logger.warning("Error parsing data: URL %s: %r", url, e) - - raise SynapseError( - 500, - "Failed to parse data URL: %s" - % (traceback.format_exception_only(sys.exc_info()[0], e),), - Codes.UNKNOWN, - ) - - return DownloadResult( - # Read back the length that has been written. - length=output_stream.tell(), - uri=url, - # If it was parsed, consider this a 200 OK. - response_code=200, - # urlopen shoves the media-type from the data URL into the content type - # header object. - media_type=url_info.headers.get_content_type(), - # Some features are not supported by data: URLs. - download_name=None, - expires=ONE_HOUR, - etag=None, - ) - - async def _handle_url( - self, url: str, user: UserID, allow_data_urls: bool = False - ) -> MediaInfo: - """ - Fetches content from a URL and parses the result to generate a MediaInfo. - - It uses the media storage provider to persist the fetched content and - stores the mapping into the database. - - Args: - url: The URL to fetch. - user: The user who ahs requested this URL. - allow_data_urls: True if data URLs should be allowed. - - Returns: - A MediaInfo object describing the fetched content. - """ - - # TODO: we should probably honour robots.txt... except in practice - # we're most likely being explicitly triggered by a human rather than a - # bot, so are we really a robot? - - file_id = datetime.date.today().isoformat() + "_" + random_string(16) - - file_info = FileInfo(server_name=None, file_id=file_id, url_cache=True) - - with self.media_storage.store_into_file(file_info) as (f, fname, finish): - if url.startswith("data:"): - if not allow_data_urls: - raise SynapseError( - 500, "Previewing of data: URLs is forbidden", Codes.UNKNOWN - ) - - download_result = await self._parse_data_url(url, f) - else: - download_result = await self._download_url(url, f) - - await finish() - - try: - time_now_ms = self.clock.time_msec() - - await self.store.store_local_media( - media_id=file_id, - media_type=download_result.media_type, - time_now_ms=time_now_ms, - upload_name=download_result.download_name, - media_length=download_result.length, - user_id=user, - url_cache=url, - ) - - except Exception as e: - logger.error("Error handling downloaded %s: %r", url, e) - # TODO: we really ought to delete the downloaded file in this - # case, since we won't have recorded it in the db, and will - # therefore not expire it. - raise - - return MediaInfo( - media_type=download_result.media_type, - media_length=download_result.length, - download_name=download_result.download_name, - created_ts_ms=time_now_ms, - filesystem_id=file_id, - filename=fname, - uri=download_result.uri, - response_code=download_result.response_code, - expires=download_result.expires, - etag=download_result.etag, - ) - - async def _precache_image_url( - self, user: UserID, media_info: MediaInfo, og: JsonDict - ) -> None: - """ - Pre-cache the image (if one exists) for posterity - - Args: - user: The user requesting the preview. - media_info: The media being previewed. - og: The Open Graph dictionary. This is modified with image information. - """ - # If there's no image or it is blank, there's nothing to do. - if "og:image" not in og: - return - - # Remove the raw image URL, this will be replaced with an MXC URL, if successful. - image_url = og.pop("og:image") - if not image_url: - return - - # The image URL from the HTML might be relative to the previewed page, - # convert it to an URL which can be requested directly. - url_parts = urlparse(image_url) - if url_parts.scheme != "data": - image_url = urljoin(media_info.uri, image_url) - - # FIXME: it might be cleaner to use the same flow as the main /preview_url - # request itself and benefit from the same caching etc. But for now we - # just rely on the caching on the master request to speed things up. - try: - image_info = await self._handle_url(image_url, user, allow_data_urls=True) - except Exception as e: - # Pre-caching the image failed, don't block the entire URL preview. - logger.warning( - "Pre-caching image failed during URL preview: %s errored with %s", - image_url, - e, - ) - return - - if _is_media(image_info.media_type): - # TODO: make sure we don't choke on white-on-transparent images - file_id = image_info.filesystem_id - dims = await self.media_repo._generate_thumbnails( - None, file_id, file_id, image_info.media_type, url_cache=True - ) - if dims: - og["og:image:width"] = dims["width"] - og["og:image:height"] = dims["height"] - else: - logger.warning("Couldn't get dims for %s", image_url) - - og["og:image"] = f"mxc://{self.server_name}/{image_info.filesystem_id}" - og["og:image:type"] = image_info.media_type - og["matrix:image:size"] = image_info.media_length - - async def _handle_oembed_response( - self, url: str, media_info: MediaInfo, expiration_ms: int - ) -> Tuple[JsonDict, Optional[str], int]: - """ - Parse the downloaded oEmbed info. - - Args: - url: The URL which is being previewed (not the one which was - requested). - media_info: The media being previewed. - expiration_ms: The length of time, in milliseconds, the media is valid for. - - Returns: - A tuple of: - The Open Graph dictionary, if the oEmbed info can be parsed. - The author name if it could be retrieved from oEmbed. - The (possibly updated) length of time, in milliseconds, the media is valid for. - """ - # If JSON was not returned, there's nothing to do. - if not _is_json(media_info.media_type): - return {}, None, expiration_ms - - with open(media_info.filename, "rb") as file: - body = file.read() - - oembed_response = self._oembed.parse_oembed_response(url, body) - open_graph_result = oembed_response.open_graph_result - - # Use the cache age from the oEmbed result, if one was given. - if open_graph_result and oembed_response.cache_age is not None: - expiration_ms = oembed_response.cache_age - - return open_graph_result, oembed_response.author_name, expiration_ms - - def _start_expire_url_cache_data(self) -> Deferred: - return run_as_background_process( - "expire_url_cache_data", self._expire_url_cache_data - ) - - async def _expire_url_cache_data(self) -> None: - """Clean up expired url cache content, media and thumbnails.""" - - assert self._worker_run_media_background_jobs - - now = self.clock.time_msec() - - logger.debug("Running url preview cache expiry") - - def try_remove_parent_dirs(dirs: Iterable[str]) -> None: - """Attempt to remove the given chain of parent directories - - Args: - dirs: The list of directory paths to delete, with children appearing - before their parents. - """ - for dir in dirs: - try: - os.rmdir(dir) - except FileNotFoundError: - # Already deleted, continue with deleting the rest - pass - except OSError as e: - # Failed, skip deleting the rest of the parent dirs - if e.errno != errno.ENOTEMPTY: - logger.warning( - "Failed to remove media directory while clearing url preview cache: %r: %s", - dir, - e, - ) - break - - # First we delete expired url cache entries - media_ids = await self.store.get_expired_url_cache(now) - - removed_media = [] - for media_id in media_ids: - fname = self.filepaths.url_cache_filepath(media_id) - try: - os.remove(fname) - except FileNotFoundError: - pass # If the path doesn't exist, meh - except OSError as e: - logger.warning( - "Failed to remove media while clearing url preview cache: %r: %s", - media_id, - e, - ) - continue - - removed_media.append(media_id) - - dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) - try_remove_parent_dirs(dirs) - - await self.store.delete_url_cache(removed_media) - - if removed_media: - logger.debug( - "Deleted %d entries from url preview cache", len(removed_media) - ) - else: - logger.debug("No entries removed from url preview cache") - - # Now we delete old images associated with the url cache. - # These may be cached for a bit on the client (i.e., they - # may have a room open with a preview url thing open). - # So we wait a couple of days before deleting, just in case. - expire_before = now - IMAGE_CACHE_EXPIRY_MS - media_ids = await self.store.get_url_cache_media_before(expire_before) - - removed_media = [] - for media_id in media_ids: - fname = self.filepaths.url_cache_filepath(media_id) - try: - os.remove(fname) - except FileNotFoundError: - pass # If the path doesn't exist, meh - except OSError as e: - logger.warning( - "Failed to remove media from url preview cache: %r: %s", media_id, e - ) - continue - - dirs = self.filepaths.url_cache_filepath_dirs_to_delete(media_id) - try_remove_parent_dirs(dirs) - - thumbnail_dir = self.filepaths.url_cache_thumbnail_directory(media_id) - try: - shutil.rmtree(thumbnail_dir) - except FileNotFoundError: - pass # If the path doesn't exist, meh - except OSError as e: - logger.warning( - "Failed to remove media from url preview cache: %r: %s", media_id, e - ) - continue - - removed_media.append(media_id) - - dirs = self.filepaths.url_cache_thumbnail_dirs_to_delete(media_id) - # Note that one of the directories to be deleted has already been - # removed by the `rmtree` above. - try_remove_parent_dirs(dirs) - - await self.store.delete_url_cache_media(removed_media) - - if removed_media: - logger.debug("Deleted %d media from url preview cache", len(removed_media)) - else: - logger.debug("No media removed from url preview cache") - - -def _is_media(content_type: str) -> bool: - return content_type.lower().startswith("image/") - - -def _is_html(content_type: str) -> bool: - content_type = content_type.lower() - return content_type.startswith("text/html") or content_type.startswith( - "application/xhtml" - ) - - -def _is_json(content_type: str) -> bool: - return content_type.lower().startswith("application/json") - - -def _is_previewable(content_type: str) -> bool: - """Returns True for content types for which we will perform URL preview and False - otherwise.""" - - return _is_html(content_type) or _is_media(content_type) or _is_json(content_type) diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index e91dc581c204..e44beae8c13f 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -26,8 +26,8 @@ from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactor from synapse.config.oembed import OEmbedEndpointConfig +from synapse.media.url_previewer import IMAGE_CACHE_EXPIRY_MS from synapse.rest.media.media_repository_resource import MediaRepositoryResource -from synapse.rest.media.preview_url_resource import IMAGE_CACHE_EXPIRY_MS from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock @@ -36,7 +36,6 @@ from tests import unittest from tests.server import FakeTransport from tests.test_utils import SMALL_PNG -from tests.utils import MockClock try: import lxml @@ -117,8 +116,9 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: return hs def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: - self.media_repo = hs.get_media_repository_resource() - self.preview_url = self.media_repo.children[b"preview_url"] + self.media_repo = hs.get_media_repository() + media_repo_resource = hs.get_media_repository_resource() + self.preview_url = media_repo_resource.children[b"preview_url"] self.lookups: Dict[str, Any] = {} @@ -193,9 +193,9 @@ def test_cache_returns_correct_type(self) -> None: ) # Clear the in-memory cache - self.assertIn("http://matrix.org", self.preview_url._cache) - self.preview_url._cache.pop("http://matrix.org") - self.assertNotIn("http://matrix.org", self.preview_url._cache) + self.assertIn("http://matrix.org", self.preview_url._url_previewer._cache) + self.preview_url._url_previewer._cache.pop("http://matrix.org") + self.assertNotIn("http://matrix.org", self.preview_url._url_previewer._cache) # Check the database cache returns the correct response channel = self.make_request( @@ -1073,7 +1073,7 @@ def test_storage_providers_exclude_files(self) -> None: """Test that files are not stored in or fetched from storage providers.""" host, media_id = self._download_image() - rel_file_path = self.preview_url.filepaths.url_cache_filepath_rel(media_id) + rel_file_path = self.media_repo.filepaths.url_cache_filepath_rel(media_id) media_store_path = os.path.join(self.media_store_path, rel_file_path) storage_provider_path = os.path.join(self.storage_path, rel_file_path) @@ -1116,7 +1116,7 @@ def test_storage_providers_exclude_thumbnails(self) -> None: host, media_id = self._download_image() rel_thumbnail_path = ( - self.preview_url.filepaths.url_cache_thumbnail_directory_rel(media_id) + self.media_repo.filepaths.url_cache_thumbnail_directory_rel(media_id) ) media_store_thumbnail_path = os.path.join( self.media_store_path, rel_thumbnail_path @@ -1143,7 +1143,7 @@ def test_storage_providers_exclude_thumbnails(self) -> None: self.assertEqual(channel.code, 200) # Remove the original, otherwise thumbnails will regenerate - rel_file_path = self.preview_url.filepaths.url_cache_filepath_rel(media_id) + rel_file_path = self.media_repo.filepaths.url_cache_filepath_rel(media_id) media_store_path = os.path.join(self.media_store_path, rel_file_path) os.remove(media_store_path) @@ -1166,26 +1166,24 @@ def test_storage_providers_exclude_thumbnails(self) -> None: def test_cache_expiry(self) -> None: """Test that URL cache files and thumbnails are cleaned up properly on expiry.""" - self.preview_url.clock = MockClock() - _host, media_id = self._download_image() - file_path = self.preview_url.filepaths.url_cache_filepath(media_id) - file_dirs = self.preview_url.filepaths.url_cache_filepath_dirs_to_delete( + file_path = self.media_repo.filepaths.url_cache_filepath(media_id) + file_dirs = self.media_repo.filepaths.url_cache_filepath_dirs_to_delete( media_id ) - thumbnail_dir = self.preview_url.filepaths.url_cache_thumbnail_directory( + thumbnail_dir = self.media_repo.filepaths.url_cache_thumbnail_directory( media_id ) - thumbnail_dirs = self.preview_url.filepaths.url_cache_thumbnail_dirs_to_delete( + thumbnail_dirs = self.media_repo.filepaths.url_cache_thumbnail_dirs_to_delete( media_id ) self.assertTrue(os.path.isfile(file_path)) self.assertTrue(os.path.isdir(thumbnail_dir)) - self.preview_url.clock.advance_time_msec(IMAGE_CACHE_EXPIRY_MS + 1) - self.get_success(self.preview_url._expire_url_cache_data()) + self.reactor.advance(IMAGE_CACHE_EXPIRY_MS * 1000 + 1) + self.get_success(self.preview_url._url_previewer._expire_url_cache_data()) for path in [file_path] + file_dirs + [thumbnail_dir] + thumbnail_dirs: self.assertFalse( From 827f198177c4cf547b9d2d1eed41411e945fc199 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 21 Mar 2023 09:13:43 +0000 Subject: [PATCH 335/344] Fix error when sending message into deleted room. (#15235) When a room is deleted in Synapse we remove the event forward extremities in the room, so if (say a bot) tries to send a message into the room we error out due to not being able to calculate prev events for the new event *before* we check if the sender is in the room. Fixes #8094 --- changelog.d/15235.bugfix | 1 + synapse/handlers/message.py | 17 +++++++++++++++-- tests/rest/admin/test_room.py | 15 +++++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15235.bugfix diff --git a/changelog.d/15235.bugfix b/changelog.d/15235.bugfix new file mode 100644 index 000000000000..e6a6bb1b9d08 --- /dev/null +++ b/changelog.d/15235.bugfix @@ -0,0 +1 @@ +Fix long-standing error when sending message into deleted room. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index da129ec16a4a..4c75433a635f 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -987,10 +987,11 @@ async def create_and_send_nonmember_event( # a situation where event persistence can't keep up, causing # extremities to pile up, which in turn leads to state resolution # taking longer. - async with self.limiter.queue(event_dict["room_id"]): + room_id = event_dict["room_id"] + async with self.limiter.queue(room_id): if txn_id: event = await self.get_event_from_transaction( - requester, txn_id, event_dict["room_id"] + requester, txn_id, room_id ) if event: # we know it was persisted, so must have a stream ordering @@ -1000,6 +1001,18 @@ async def create_and_send_nonmember_event( event.internal_metadata.stream_ordering, ) + # If we don't have any prev event IDs specified then we need to + # check that the host is in the room (as otherwise populating the + # prev events will fail), at which point we may as well check the + # local user is in the room. + if not prev_event_ids: + user_id = requester.user.to_string() + is_user_in_room = await self.store.check_local_user_in_room( + user_id, room_id + ) + if not is_user_in_room: + raise AuthError(403, f"User {user_id} not in room {room_id}") + # Try several times, it could fail with PartialStateConflictError # in handle_new_client_event, cf comment in except block. max_retries = 5 diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 9dbb778679d1..eb50086c508e 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -402,6 +402,21 @@ def test_shutdown_room_block_peek(self) -> None: # Assert we can no longer peek into the room self._assert_peek(self.room_id, expect_code=403) + def test_room_delete_send(self) -> None: + """Test that sending into a deleted room returns a 403""" + channel = self.make_request( + "DELETE", + self.url, + content={}, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + + self.helper.send( + self.room_id, "test message", expect_code=403, tok=self.other_user_tok + ) + def _is_blocked(self, room_id: str, expect: bool = True) -> None: """Assert that the room is blocked or not""" d = self.store.is_room_blocked(room_id) From f11fe931f5925765b4e9e4e4c9d0b79e548ebfb4 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Tue, 21 Mar 2023 11:51:03 +0000 Subject: [PATCH 336/344] Document that our Docker images are mirrored to GHCR. (#15282) Signed-off-by: Olivier Wilkinson (reivilibre) --- changelog.d/15282.docker | 1 + docs/setup/installation.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/15282.docker diff --git a/changelog.d/15282.docker b/changelog.d/15282.docker new file mode 100644 index 000000000000..6990430ef44d --- /dev/null +++ b/changelog.d/15282.docker @@ -0,0 +1 @@ +Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). \ No newline at end of file diff --git a/docs/setup/installation.md b/docs/setup/installation.md index d123e339edc9..86e506a3e231 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -26,8 +26,8 @@ for most users. #### Docker images and Ansible playbooks There is an official synapse image available at - which can be used with -the docker-compose file available at + or at [`ghcr.io/matrix-org/synapse`](https://ghcr.io/matrix-org/synapse) +which can be used with the docker-compose file available at [contrib/docker](https://github.com/matrix-org/synapse/tree/develop/contrib/docker). Further information on this including configuration options is available in the README on hub.docker.com. From b6aef593347924d39b4ff8b07e375eb656001545 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 21 Mar 2023 13:23:47 +0000 Subject: [PATCH 337/344] Make `EventHandler.get_event` return `None` when the requested event is not found (#15298) --- changelog.d/15298.bugfix | 1 + synapse/handlers/events.py | 9 +++++---- tests/rest/client/test_report_event.py | 5 +++++ 3 files changed, 11 insertions(+), 4 deletions(-) create mode 100644 changelog.d/15298.bugfix diff --git a/changelog.d/15298.bugfix b/changelog.d/15298.bugfix new file mode 100644 index 000000000000..8f29b08444fb --- /dev/null +++ b/changelog.d/15298.bugfix @@ -0,0 +1 @@ +Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). \ No newline at end of file diff --git a/synapse/handlers/events.py b/synapse/handlers/events.py index 68c07f0265c9..33359f6ed748 100644 --- a/synapse/handlers/events.py +++ b/synapse/handlers/events.py @@ -159,15 +159,16 @@ async def get_event( Returns: An event, or None if there is no event matching this ID. Raises: - SynapseError if there was a problem retrieving this event, or - AuthError if the user does not have the rights to inspect this - event. + AuthError: if the user does not have the rights to inspect this event. """ redact_behaviour = ( EventRedactBehaviour.as_is if show_redacted else EventRedactBehaviour.redact ) event = await self.store.get_event( - event_id, check_room_id=room_id, redact_behaviour=redact_behaviour + event_id, + check_room_id=room_id, + redact_behaviour=redact_behaviour, + allow_none=True, ) if not event: diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py index 1250685d39ce..1a8ab067a9d7 100644 --- a/tests/rest/client/test_report_event.py +++ b/tests/rest/client/test_report_event.py @@ -84,6 +84,11 @@ def test_cannot_report_nonexistent_event(self) -> None: access_token=self.other_user_tok, ) self.assertEqual(404, channel.code, msg=channel.result["body"]) + self.assertEqual( + "Unable to report event: it does not exist or you aren't able to see it.", + channel.json_body["error"], + msg=channel.result["body"], + ) def _assert_status(self, response_status: int, data: JsonDict) -> None: channel = self.make_request( From ec9224bf9a7bebb6c429ef45e0d1a293f0986836 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 21 Mar 2023 13:24:03 +0000 Subject: [PATCH 338/344] Make `POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}` endpoint return 404 if event exists, but the user lacks access (#15300) --- changelog.d/15300.bugfix | 1 + docs/upgrade.md | 12 ++++++ synapse/rest/client/report_event.py | 16 +++++--- .../storage/databases/main/events_worker.py | 1 - tests/rest/client/test_report_event.py | 37 +++++++++++++++++++ 5 files changed, 61 insertions(+), 6 deletions(-) create mode 100644 changelog.d/15300.bugfix diff --git a/changelog.d/15300.bugfix b/changelog.d/15300.bugfix new file mode 100644 index 000000000000..8f29b08444fb --- /dev/null +++ b/changelog.d/15300.bugfix @@ -0,0 +1 @@ +Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). \ No newline at end of file diff --git a/docs/upgrade.md b/docs/upgrade.md index f06e87405424..f14444a400ea 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,18 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.80.0 + +## Reporting events error code change + +Before this update, the +[`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) +endpoint would return a `403` if a user attempted to report an event that they did not have access to. +This endpoint will now return a `404` in this case instead. + +Clients that implement event reporting should check that their error handling code will handle this +change. + # Upgrading to v1.79.0 ## The `on_threepid_bind` module callback method has been deprecated diff --git a/synapse/rest/client/report_event.py b/synapse/rest/client/report_event.py index 9be58602210b..ac1a63ca2745 100644 --- a/synapse/rest/client/report_event.py +++ b/synapse/rest/client/report_event.py @@ -16,7 +16,7 @@ from http import HTTPStatus from typing import TYPE_CHECKING, Tuple -from synapse.api.errors import Codes, NotFoundError, SynapseError +from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.http.server import HttpServer from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.http.site import SynapseRequest @@ -62,12 +62,18 @@ async def on_POST( Codes.BAD_JSON, ) - event = await self._event_handler.get_event( - requester.user, room_id, event_id, show_redacted=False - ) + try: + event = await self._event_handler.get_event( + requester.user, room_id, event_id, show_redacted=False + ) + except AuthError: + # The event exists, but this user is not allowed to access this event. + event = None + if event is None: raise NotFoundError( - "Unable to report event: it does not exist or you aren't able to see it." + "Unable to report event: " + "it does not exist or you aren't able to see it." ) await self.store.add_event_report( diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 20b7a683622c..0cf46626d2ff 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -805,7 +805,6 @@ async def get_missing_events_from_cache_or_db() -> Dict[ # the events have been redacted, and if so pulling the redaction event # out of the database to check it. # - missing_events = {} try: # Try to fetch from any external cache. We already checked the # in-memory cache above. diff --git a/tests/rest/client/test_report_event.py b/tests/rest/client/test_report_event.py index 1a8ab067a9d7..b88f1d61a081 100644 --- a/tests/rest/client/test_report_event.py +++ b/tests/rest/client/test_report_event.py @@ -90,6 +90,43 @@ def test_cannot_report_nonexistent_event(self) -> None: msg=channel.result["body"], ) + def test_cannot_report_event_if_not_in_room(self) -> None: + """ + Tests that we don't accept event reports for events that exist, but for which + the reporter should not be able to view (because they are not in the room). + """ + # Have the admin user create a room (the "other" user will not join this room). + new_room_id = self.helper.create_room_as(tok=self.admin_user_tok) + + # Have the admin user send an event in this room. + response = self.helper.send_event( + new_room_id, + "m.room.message", + content={ + "msgtype": "m.text", + "body": "This event has some bad words in it! Flip!", + }, + tok=self.admin_user_tok, + ) + event_id = response["event_id"] + + # Have the "other" user attempt to report it. Perhaps they found the event ID + # in a screenshot or something... + channel = self.make_request( + "POST", + f"rooms/{new_room_id}/report/{event_id}", + {"reason": "I'm not in this room but I have opinions anyways!"}, + access_token=self.other_user_tok, + ) + + # The "other" user is not in the room, so their report should be rejected. + self.assertEqual(404, channel.code, msg=channel.result["body"]) + self.assertEqual( + "Unable to report event: it does not exist or you aren't able to see it.", + channel.json_body["error"], + msg=channel.result["body"], + ) + def _assert_status(self, response_status: int, data: JsonDict) -> None: channel = self.make_request( "POST", self.report_path, data, access_token=self.other_user_tok From 96bcc5d9028e745df2f708c92b15e4e5bfc91328 Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 21 Mar 2023 10:49:25 -0700 Subject: [PATCH 339/344] Revert "check sqlite database file exists before porting/#14692" (#15301) --- changelog.d/15301.bugfix | 3 +++ synapse/_scripts/synapse_port_db.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 changelog.d/15301.bugfix diff --git a/changelog.d/15301.bugfix b/changelog.d/15301.bugfix new file mode 100644 index 000000000000..c8f3628d3493 --- /dev/null +++ b/changelog.d/15301.bugfix @@ -0,0 +1,3 @@ +Fix a bug introduced in Synapse 1.75.0rc1 where the [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite) +would fail to open the SQLite database. + diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 2c9cbf8b275b..78d76d38ad90 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -1329,7 +1329,7 @@ def main() -> None: sqlite_config = { "name": "sqlite3", "args": { - "database": "file:{}?mode=rw".format(args.sqlite_database), + "database": args.sqlite_database, "cp_min": 1, "cp_max": 1, "check_same_thread": False, From 72832a615800828b81c3f4c2ce83bc91d950d527 Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Tue, 21 Mar 2023 10:56:21 -0700 Subject: [PATCH 340/344] 1.80.0rc1 --- CHANGES.md | 66 +++++++++++++++++++++++++++++++++++++++ changelog.d/14755.bugfix | 1 - changelog.d/14756.bugfix | 1 - changelog.d/14921.misc | 1 - changelog.d/14974.misc | 1 - changelog.d/15187.feature | 1 - changelog.d/15190.bugfix | 1 - changelog.d/15195.misc | 1 - changelog.d/15200.misc | 1 - changelog.d/15222.misc | 1 - changelog.d/15223.doc | 1 - changelog.d/15229.misc | 1 - changelog.d/15230.misc | 1 - changelog.d/15231.misc | 1 - changelog.d/15232.bugfix | 1 - changelog.d/15235.bugfix | 1 - changelog.d/15237.misc | 1 - changelog.d/15238.misc | 1 - changelog.d/15239.docker | 1 - changelog.d/15244.misc | 1 - changelog.d/15247.misc | 1 - changelog.d/15249.feature | 1 - changelog.d/15252.misc | 1 - changelog.d/15253.misc | 1 - changelog.d/15254.misc | 1 - changelog.d/15255.misc | 1 - changelog.d/15256.misc | 1 - changelog.d/15257.misc | 1 - changelog.d/15262.misc | 1 - changelog.d/15266.misc | 1 - changelog.d/15268.feature | 1 - changelog.d/15269.misc | 1 - changelog.d/15272.misc | 1 - changelog.d/15274.misc | 1 - changelog.d/15275.misc | 1 - changelog.d/15281.docker | 1 - changelog.d/15282.docker | 1 - changelog.d/15286.misc | 1 - changelog.d/15287.misc | 1 - changelog.d/15288.misc | 1 - changelog.d/15289.misc | 1 - changelog.d/15290.misc | 1 - changelog.d/15291.misc | 1 - changelog.d/15293.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- 46 files changed, 73 insertions(+), 44 deletions(-) delete mode 100644 changelog.d/14755.bugfix delete mode 100644 changelog.d/14756.bugfix delete mode 100644 changelog.d/14921.misc delete mode 100644 changelog.d/14974.misc delete mode 100644 changelog.d/15187.feature delete mode 100644 changelog.d/15190.bugfix delete mode 100644 changelog.d/15195.misc delete mode 100644 changelog.d/15200.misc delete mode 100644 changelog.d/15222.misc delete mode 100644 changelog.d/15223.doc delete mode 100644 changelog.d/15229.misc delete mode 100644 changelog.d/15230.misc delete mode 100644 changelog.d/15231.misc delete mode 100644 changelog.d/15232.bugfix delete mode 100644 changelog.d/15235.bugfix delete mode 100644 changelog.d/15237.misc delete mode 100644 changelog.d/15238.misc delete mode 100644 changelog.d/15239.docker delete mode 100644 changelog.d/15244.misc delete mode 100644 changelog.d/15247.misc delete mode 100644 changelog.d/15249.feature delete mode 100644 changelog.d/15252.misc delete mode 100644 changelog.d/15253.misc delete mode 100644 changelog.d/15254.misc delete mode 100644 changelog.d/15255.misc delete mode 100644 changelog.d/15256.misc delete mode 100644 changelog.d/15257.misc delete mode 100644 changelog.d/15262.misc delete mode 100644 changelog.d/15266.misc delete mode 100644 changelog.d/15268.feature delete mode 100644 changelog.d/15269.misc delete mode 100644 changelog.d/15272.misc delete mode 100644 changelog.d/15274.misc delete mode 100644 changelog.d/15275.misc delete mode 100644 changelog.d/15281.docker delete mode 100644 changelog.d/15282.docker delete mode 100644 changelog.d/15286.misc delete mode 100644 changelog.d/15287.misc delete mode 100644 changelog.d/15288.misc delete mode 100644 changelog.d/15289.misc delete mode 100644 changelog.d/15290.misc delete mode 100644 changelog.d/15291.misc delete mode 100644 changelog.d/15293.misc diff --git a/CHANGES.md b/CHANGES.md index 158ef035495e..5d0f66194292 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,69 @@ +Synapse 1.80.0rc1 (2023-03-21) +============================== + +Features +-------- + +- Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. ([\#15187](https://github.com/matrix-org/synapse/issues/15187)) +- Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15249](https://github.com/matrix-org/synapse/issues/15249)) +- Allow loading `/register/available` endpoint on workers. ([\#15268](https://github.com/matrix-org/synapse/issues/15268)) + + +Bugfixes +-------- + +- Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. ([\#14755](https://github.com/matrix-org/synapse/issues/14755), [\#14756](https://github.com/matrix-org/synapse/issues/14756)) +- Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. ([\#15190](https://github.com/matrix-org/synapse/issues/15190)) +- Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. ([\#15232](https://github.com/matrix-org/synapse/issues/15232)) +- Fix long-standing error when sending message into deleted room. ([\#15235](https://github.com/matrix-org/synapse/issues/15235)) + + +Updates to the Docker image +--------------------------- + +- Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel. ([\#15239](https://github.com/matrix-org/synapse/issues/15239)) +- Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). ([\#15281](https://github.com/matrix-org/synapse/issues/15281), [\#15282](https://github.com/matrix-org/synapse/issues/15282)) + + +Improved Documentation +---------------------- + +- Add a missing endpoint to the workers documentation. ([\#15223](https://github.com/matrix-org/synapse/issues/15223)) + + +Internal Changes +---------------- + +- Add additional functionality to declaring worker types when starting Complement in worker mode. ([\#14921](https://github.com/matrix-org/synapse/issues/14921)) +- Add `Synapse-Trace-Id` to `access-control-expose-headers` header. ([\#14974](https://github.com/matrix-org/synapse/issues/14974)) +- Improve performance of creating and authenticating events. ([\#15195](https://github.com/matrix-org/synapse/issues/15195)) +- Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. ([\#15200](https://github.com/matrix-org/synapse/issues/15200)) +- Improve log lines when purging rooms. ([\#15222](https://github.com/matrix-org/synapse/issues/15222)) +- Add topic and name events to group of events that are batch persisted when creating a room. ([\#15229](https://github.com/matrix-org/synapse/issues/15229)) +- Improve type hints. ([\#15230](https://github.com/matrix-org/synapse/issues/15230), [\#15231](https://github.com/matrix-org/synapse/issues/15231), [\#15238](https://github.com/matrix-org/synapse/issues/15238)) +- Move various module API callback registration methods to a dedicated class. ([\#15237](https://github.com/matrix-org/synapse/issues/15237)) +- Configure GitHub Actions for merge queues. ([\#15244](https://github.com/matrix-org/synapse/issues/15244)) +- Add schema comments about the `destinations` and `destination_rooms` tables. ([\#15247](https://github.com/matrix-org/synapse/issues/15247)) +- Bump hiredis from 2.2.1 to 2.2.2. ([\#15252](https://github.com/matrix-org/synapse/issues/15252)) +- Bump serde from 1.0.152 to 1.0.155. ([\#15253](https://github.com/matrix-org/synapse/issues/15253)) +- Bump pysaml2 from 7.2.1 to 7.3.1. ([\#15254](https://github.com/matrix-org/synapse/issues/15254)) +- Bump msgpack from 1.0.4 to 1.0.5. ([\#15255](https://github.com/matrix-org/synapse/issues/15255)) +- Bump gitpython from 3.1.30 to 3.1.31. ([\#15256](https://github.com/matrix-org/synapse/issues/15256)) +- Bump cryptography from 39.0.1 to 39.0.2. ([\#15257](https://github.com/matrix-org/synapse/issues/15257)) +- Skip processing of auto-join room behaviour if there are not auto-join rooms configured. ([\#15262](https://github.com/matrix-org/synapse/issues/15262)) +- Remove unused store method `_set_destination_retry_timings_emulated`. ([\#15266](https://github.com/matrix-org/synapse/issues/15266)) +- Reorganize URL preview code. ([\#15269](https://github.com/matrix-org/synapse/issues/15269)) +- Clean-up direct TCP replication code. ([\#15272](https://github.com/matrix-org/synapse/issues/15272), [\#15274](https://github.com/matrix-org/synapse/issues/15274)) +- Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. ([\#15275](https://github.com/matrix-org/synapse/issues/15275)) +- Bump pydantic from 1.10.4 to 1.10.6. ([\#15286](https://github.com/matrix-org/synapse/issues/15286)) +- Bump serde from 1.0.155 to 1.0.157. ([\#15287](https://github.com/matrix-org/synapse/issues/15287)) +- Bump anyhow from 1.0.69 to 1.0.70. ([\#15288](https://github.com/matrix-org/synapse/issues/15288)) +- Bump txredisapi from 1.4.7 to 1.4.9. ([\#15289](https://github.com/matrix-org/synapse/issues/15289)) +- Bump pygithub from 1.57 to 1.58.1. ([\#15290](https://github.com/matrix-org/synapse/issues/15290)) +- Bump types-requests from 2.28.11.12 to 2.28.11.15. ([\#15291](https://github.com/matrix-org/synapse/issues/15291)) +- Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15293](https://github.com/matrix-org/synapse/issues/15293)) + + Synapse 1.79.0 (2023-03-14) =========================== diff --git a/changelog.d/14755.bugfix b/changelog.d/14755.bugfix deleted file mode 100644 index 12f979e9d041..000000000000 --- a/changelog.d/14755.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. \ No newline at end of file diff --git a/changelog.d/14756.bugfix b/changelog.d/14756.bugfix deleted file mode 100644 index 12f979e9d041..000000000000 --- a/changelog.d/14756.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. \ No newline at end of file diff --git a/changelog.d/14921.misc b/changelog.d/14921.misc deleted file mode 100644 index 599e23eb0c9f..000000000000 --- a/changelog.d/14921.misc +++ /dev/null @@ -1 +0,0 @@ -Add additional functionality to declaring worker types when starting Complement in worker mode. diff --git a/changelog.d/14974.misc b/changelog.d/14974.misc deleted file mode 100644 index 05c5f0144427..000000000000 --- a/changelog.d/14974.misc +++ /dev/null @@ -1 +0,0 @@ -Add `Synapse-Trace-Id` to `access-control-expose-headers` header. diff --git a/changelog.d/15187.feature b/changelog.d/15187.feature deleted file mode 100644 index f2b768925521..000000000000 --- a/changelog.d/15187.feature +++ /dev/null @@ -1 +0,0 @@ -Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. diff --git a/changelog.d/15190.bugfix b/changelog.d/15190.bugfix deleted file mode 100644 index 5c3a86320ec8..000000000000 --- a/changelog.d/15190.bugfix +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. diff --git a/changelog.d/15195.misc b/changelog.d/15195.misc deleted file mode 100644 index d8beea917d8b..000000000000 --- a/changelog.d/15195.misc +++ /dev/null @@ -1 +0,0 @@ -Improve performance of creating and authenticating events. \ No newline at end of file diff --git a/changelog.d/15200.misc b/changelog.d/15200.misc deleted file mode 100644 index dc6617222650..000000000000 --- a/changelog.d/15200.misc +++ /dev/null @@ -1 +0,0 @@ -Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. diff --git a/changelog.d/15222.misc b/changelog.d/15222.misc deleted file mode 100644 index 6361676a1537..000000000000 --- a/changelog.d/15222.misc +++ /dev/null @@ -1 +0,0 @@ -Improve log lines when purging rooms. diff --git a/changelog.d/15223.doc b/changelog.d/15223.doc deleted file mode 100644 index 136b44df31a2..000000000000 --- a/changelog.d/15223.doc +++ /dev/null @@ -1 +0,0 @@ -Add a missing endpoint to the workers documentation. diff --git a/changelog.d/15229.misc b/changelog.d/15229.misc deleted file mode 100644 index 4d8ea03b27c6..000000000000 --- a/changelog.d/15229.misc +++ /dev/null @@ -1 +0,0 @@ -Add topic and name events to group of events that are batch persisted when creating a room. diff --git a/changelog.d/15230.misc b/changelog.d/15230.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15230.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15231.misc b/changelog.d/15231.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15231.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15232.bugfix b/changelog.d/15232.bugfix deleted file mode 100644 index d75a4f2d99d5..000000000000 --- a/changelog.d/15232.bugfix +++ /dev/null @@ -1 +0,0 @@ -Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. diff --git a/changelog.d/15235.bugfix b/changelog.d/15235.bugfix deleted file mode 100644 index e6a6bb1b9d08..000000000000 --- a/changelog.d/15235.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing error when sending message into deleted room. diff --git a/changelog.d/15237.misc b/changelog.d/15237.misc deleted file mode 100644 index 9981606c3226..000000000000 --- a/changelog.d/15237.misc +++ /dev/null @@ -1 +0,0 @@ -Move various module API callback registration methods to a dedicated class. \ No newline at end of file diff --git a/changelog.d/15238.misc b/changelog.d/15238.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15238.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15239.docker b/changelog.d/15239.docker deleted file mode 100644 index 2aad329e99b3..000000000000 --- a/changelog.d/15239.docker +++ /dev/null @@ -1 +0,0 @@ -Ensure the Dockerfile builds on platforms that don't have a `cryptography` wheel. diff --git a/changelog.d/15244.misc b/changelog.d/15244.misc deleted file mode 100644 index dacdcf4d5d6f..000000000000 --- a/changelog.d/15244.misc +++ /dev/null @@ -1 +0,0 @@ -Configure GitHub Actions for merge queues. diff --git a/changelog.d/15247.misc b/changelog.d/15247.misc deleted file mode 100644 index 6e2ce1d4d8bd..000000000000 --- a/changelog.d/15247.misc +++ /dev/null @@ -1 +0,0 @@ -Add schema comments about the `destinations` and `destination_rooms` tables. \ No newline at end of file diff --git a/changelog.d/15249.feature b/changelog.d/15249.feature deleted file mode 100644 index 92d48a208723..000000000000 --- a/changelog.d/15249.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. diff --git a/changelog.d/15252.misc b/changelog.d/15252.misc deleted file mode 100644 index cf18d9e84982..000000000000 --- a/changelog.d/15252.misc +++ /dev/null @@ -1 +0,0 @@ -Bump hiredis from 2.2.1 to 2.2.2. diff --git a/changelog.d/15253.misc b/changelog.d/15253.misc deleted file mode 100644 index 4aa75294c728..000000000000 --- a/changelog.d/15253.misc +++ /dev/null @@ -1 +0,0 @@ -Bump serde from 1.0.152 to 1.0.155. diff --git a/changelog.d/15254.misc b/changelog.d/15254.misc deleted file mode 100644 index 83287914c317..000000000000 --- a/changelog.d/15254.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pysaml2 from 7.2.1 to 7.3.1. diff --git a/changelog.d/15255.misc b/changelog.d/15255.misc deleted file mode 100644 index 8be3b828e96b..000000000000 --- a/changelog.d/15255.misc +++ /dev/null @@ -1 +0,0 @@ -Bump msgpack from 1.0.4 to 1.0.5. diff --git a/changelog.d/15256.misc b/changelog.d/15256.misc deleted file mode 100644 index ecdc2e5d22ee..000000000000 --- a/changelog.d/15256.misc +++ /dev/null @@ -1 +0,0 @@ -Bump gitpython from 3.1.30 to 3.1.31. diff --git a/changelog.d/15257.misc b/changelog.d/15257.misc deleted file mode 100644 index 2c0932233ca2..000000000000 --- a/changelog.d/15257.misc +++ /dev/null @@ -1 +0,0 @@ -Bump cryptography from 39.0.1 to 39.0.2. diff --git a/changelog.d/15262.misc b/changelog.d/15262.misc deleted file mode 100644 index d519f151c454..000000000000 --- a/changelog.d/15262.misc +++ /dev/null @@ -1 +0,0 @@ -Skip processing of auto-join room behaviour if there are not auto-join rooms configured. diff --git a/changelog.d/15266.misc b/changelog.d/15266.misc deleted file mode 100644 index 285b72cdd159..000000000000 --- a/changelog.d/15266.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused store method `_set_destination_retry_timings_emulated`. \ No newline at end of file diff --git a/changelog.d/15268.feature b/changelog.d/15268.feature deleted file mode 100644 index 5f1f1a0f589c..000000000000 --- a/changelog.d/15268.feature +++ /dev/null @@ -1 +0,0 @@ -Allow loading `/register/available` endpoint on workers. diff --git a/changelog.d/15269.misc b/changelog.d/15269.misc deleted file mode 100644 index b3126fb1f4c9..000000000000 --- a/changelog.d/15269.misc +++ /dev/null @@ -1 +0,0 @@ -Reorganize URL preview code. diff --git a/changelog.d/15272.misc b/changelog.d/15272.misc deleted file mode 100644 index f7c0276ecc22..000000000000 --- a/changelog.d/15272.misc +++ /dev/null @@ -1 +0,0 @@ -Clean-up direct TCP replication code. diff --git a/changelog.d/15274.misc b/changelog.d/15274.misc deleted file mode 100644 index f7c0276ecc22..000000000000 --- a/changelog.d/15274.misc +++ /dev/null @@ -1 +0,0 @@ -Clean-up direct TCP replication code. diff --git a/changelog.d/15275.misc b/changelog.d/15275.misc deleted file mode 100644 index b222acd72b4b..000000000000 --- a/changelog.d/15275.misc +++ /dev/null @@ -1 +0,0 @@ -Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. \ No newline at end of file diff --git a/changelog.d/15281.docker b/changelog.d/15281.docker deleted file mode 100644 index 6990430ef44d..000000000000 --- a/changelog.d/15281.docker +++ /dev/null @@ -1 +0,0 @@ -Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). \ No newline at end of file diff --git a/changelog.d/15282.docker b/changelog.d/15282.docker deleted file mode 100644 index 6990430ef44d..000000000000 --- a/changelog.d/15282.docker +++ /dev/null @@ -1 +0,0 @@ -Mirror images to the GitHub Container Registry (`ghcr.io/matrix-org/synapse`). \ No newline at end of file diff --git a/changelog.d/15286.misc b/changelog.d/15286.misc deleted file mode 100644 index 48a5b400be41..000000000000 --- a/changelog.d/15286.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pydantic from 1.10.4 to 1.10.6. diff --git a/changelog.d/15287.misc b/changelog.d/15287.misc deleted file mode 100644 index d5b80d108743..000000000000 --- a/changelog.d/15287.misc +++ /dev/null @@ -1 +0,0 @@ -Bump serde from 1.0.155 to 1.0.157. diff --git a/changelog.d/15288.misc b/changelog.d/15288.misc deleted file mode 100644 index 908b590595cd..000000000000 --- a/changelog.d/15288.misc +++ /dev/null @@ -1 +0,0 @@ -Bump anyhow from 1.0.69 to 1.0.70. diff --git a/changelog.d/15289.misc b/changelog.d/15289.misc deleted file mode 100644 index 1a3921af8417..000000000000 --- a/changelog.d/15289.misc +++ /dev/null @@ -1 +0,0 @@ -Bump txredisapi from 1.4.7 to 1.4.9. diff --git a/changelog.d/15290.misc b/changelog.d/15290.misc deleted file mode 100644 index f23043874fb0..000000000000 --- a/changelog.d/15290.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pygithub from 1.57 to 1.58.1. diff --git a/changelog.d/15291.misc b/changelog.d/15291.misc deleted file mode 100644 index 74de7a2065af..000000000000 --- a/changelog.d/15291.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-requests from 2.28.11.12 to 2.28.11.15. diff --git a/changelog.d/15293.misc b/changelog.d/15293.misc deleted file mode 100644 index 57447956204a..000000000000 --- a/changelog.d/15293.misc +++ /dev/null @@ -1 +0,0 @@ -Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). diff --git a/debian/changelog b/debian/changelog index 10f4815d351d..32df14add676 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.80.0~rc1) stable; urgency=medium + + * New Synapse release 1.80.0rc1. + + -- Synapse Packaging team Tue, 21 Mar 2023 10:56:08 -0700 + matrix-synapse-py3 (1.79.0) stable; urgency=medium * New Synapse release 1.79.0. diff --git a/pyproject.toml b/pyproject.toml index 2df931653c46..0ba5c9480a3f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.79.0" +version = "1.80.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 1e1c220084e7f60e575a50e4e2d925d1181c5ccf Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 21 Mar 2023 10:59:41 -0700 Subject: [PATCH 341/344] Update CHANGES.md --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 5d0f66194292..2746e4f11227 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -15,7 +15,7 @@ Bugfixes - Fix a long-standing bug in which the user directory would assume any remote membership state events represent a profile change. ([\#14755](https://github.com/matrix-org/synapse/issues/14755), [\#14756](https://github.com/matrix-org/synapse/issues/14756)) - Implement [MSC3873](https://github.com/matrix-org/matrix-spec-proposals/pull/3873) to fix a long-standing bug where properties with dots were handled ambiguously in push rules. ([\#15190](https://github.com/matrix-org/synapse/issues/15190)) - Faster joins: Fix a bug introduced in Synapse 1.66 where spurious "Failed to find memberships ..." errors would be logged. ([\#15232](https://github.com/matrix-org/synapse/issues/15232)) -- Fix long-standing error when sending message into deleted room. ([\#15235](https://github.com/matrix-org/synapse/issues/15235)) +- Fix a long-standing error when sending message into deleted room. ([\#15235](https://github.com/matrix-org/synapse/issues/15235)) Updates to the Docker image From 9f5d7d5ba20bfa767089bae451e3b31dd347438f Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Tue, 21 Mar 2023 11:07:37 -0700 Subject: [PATCH 342/344] update changelog --- CHANGES.md | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 5d0f66194292..a2d9ad090b91 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -7,6 +7,8 @@ Features - Stabilise support for [MSC3966](https://github.com/matrix-org/matrix-spec-proposals/pull/3966): `event_property_contains` push condition. ([\#15187](https://github.com/matrix-org/synapse/issues/15187)) - Implement [MSC2659](https://github.com/matrix-org/matrix-spec-proposals/pull/2659): application service ping endpoint. Contributed by Tulir @ Beeper. ([\#15249](https://github.com/matrix-org/synapse/issues/15249)) - Allow loading `/register/available` endpoint on workers. ([\#15268](https://github.com/matrix-org/synapse/issues/15268)) +- Improve performance of creating and authenticating events. ([\#15195](https://github.com/matrix-org/synapse/issues/15195)) +- Add topic and name events to group of events that are batch persisted when creating a room. ([\#15229](https://github.com/matrix-org/synapse/issues/15229)) Bugfixes @@ -36,32 +38,31 @@ Internal Changes - Add additional functionality to declaring worker types when starting Complement in worker mode. ([\#14921](https://github.com/matrix-org/synapse/issues/14921)) - Add `Synapse-Trace-Id` to `access-control-expose-headers` header. ([\#14974](https://github.com/matrix-org/synapse/issues/14974)) -- Improve performance of creating and authenticating events. ([\#15195](https://github.com/matrix-org/synapse/issues/15195)) - Make the `HttpTransactionCache` use the `Requester` in addition of the just the `Request` to build the transaction key. ([\#15200](https://github.com/matrix-org/synapse/issues/15200)) - Improve log lines when purging rooms. ([\#15222](https://github.com/matrix-org/synapse/issues/15222)) -- Add topic and name events to group of events that are batch persisted when creating a room. ([\#15229](https://github.com/matrix-org/synapse/issues/15229)) - Improve type hints. ([\#15230](https://github.com/matrix-org/synapse/issues/15230), [\#15231](https://github.com/matrix-org/synapse/issues/15231), [\#15238](https://github.com/matrix-org/synapse/issues/15238)) - Move various module API callback registration methods to a dedicated class. ([\#15237](https://github.com/matrix-org/synapse/issues/15237)) - Configure GitHub Actions for merge queues. ([\#15244](https://github.com/matrix-org/synapse/issues/15244)) - Add schema comments about the `destinations` and `destination_rooms` tables. ([\#15247](https://github.com/matrix-org/synapse/issues/15247)) +- Skip processing of auto-join room behaviour if there are no auto-join rooms configured. ([\#15262](https://github.com/matrix-org/synapse/issues/15262)) +- Remove unused store method `_set_destination_retry_timings_emulated`. ([\#15266](https://github.com/matrix-org/synapse/issues/15266)) +- Reorganize URL preview code. ([\#15269](https://github.com/matrix-org/synapse/issues/15269)) +- Clean-up direct TCP replication code. ([\#15272](https://github.com/matrix-org/synapse/issues/15272), [\#15274](https://github.com/matrix-org/synapse/issues/15274)) +- Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. ([\#15275](https://github.com/matrix-org/synapse/issues/15275)) +- Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15293](https://github.com/matrix-org/synapse/issues/15293)) - Bump hiredis from 2.2.1 to 2.2.2. ([\#15252](https://github.com/matrix-org/synapse/issues/15252)) - Bump serde from 1.0.152 to 1.0.155. ([\#15253](https://github.com/matrix-org/synapse/issues/15253)) - Bump pysaml2 from 7.2.1 to 7.3.1. ([\#15254](https://github.com/matrix-org/synapse/issues/15254)) - Bump msgpack from 1.0.4 to 1.0.5. ([\#15255](https://github.com/matrix-org/synapse/issues/15255)) - Bump gitpython from 3.1.30 to 3.1.31. ([\#15256](https://github.com/matrix-org/synapse/issues/15256)) - Bump cryptography from 39.0.1 to 39.0.2. ([\#15257](https://github.com/matrix-org/synapse/issues/15257)) -- Skip processing of auto-join room behaviour if there are not auto-join rooms configured. ([\#15262](https://github.com/matrix-org/synapse/issues/15262)) -- Remove unused store method `_set_destination_retry_timings_emulated`. ([\#15266](https://github.com/matrix-org/synapse/issues/15266)) -- Reorganize URL preview code. ([\#15269](https://github.com/matrix-org/synapse/issues/15269)) -- Clean-up direct TCP replication code. ([\#15272](https://github.com/matrix-org/synapse/issues/15272), [\#15274](https://github.com/matrix-org/synapse/issues/15274)) -- Make `configure_workers_and_start` script used in Complement tests compatible with older versions of Python. ([\#15275](https://github.com/matrix-org/synapse/issues/15275)) - Bump pydantic from 1.10.4 to 1.10.6. ([\#15286](https://github.com/matrix-org/synapse/issues/15286)) - Bump serde from 1.0.155 to 1.0.157. ([\#15287](https://github.com/matrix-org/synapse/issues/15287)) - Bump anyhow from 1.0.69 to 1.0.70. ([\#15288](https://github.com/matrix-org/synapse/issues/15288)) - Bump txredisapi from 1.4.7 to 1.4.9. ([\#15289](https://github.com/matrix-org/synapse/issues/15289)) - Bump pygithub from 1.57 to 1.58.1. ([\#15290](https://github.com/matrix-org/synapse/issues/15290)) - Bump types-requests from 2.28.11.12 to 2.28.11.15. ([\#15291](https://github.com/matrix-org/synapse/issues/15291)) -- Add a `/versions` flag for [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15293](https://github.com/matrix-org/synapse/issues/15293)) + Synapse 1.79.0 (2023-03-14) From 7655bc054296ced0810d0a649ac7993c9443658b Mon Sep 17 00:00:00 2001 From: "H. Shay" Date: Wed, 22 Mar 2023 08:30:23 -0700 Subject: [PATCH 343/344] 1.80.0rc2 --- CHANGES.md | 11 +++++++++++ changelog.d/15298.bugfix | 1 - changelog.d/15300.bugfix | 1 - changelog.d/15301.bugfix | 3 --- debian/changelog | 6 ++++++ pyproject.toml | 2 +- 6 files changed, 18 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/15298.bugfix delete mode 100644 changelog.d/15300.bugfix delete mode 100644 changelog.d/15301.bugfix diff --git a/CHANGES.md b/CHANGES.md index 1d15bacb9085..88cb2bdf259a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,14 @@ +Synapse 1.80.0rc2 (2023-03-22) +============================== + +Bugfixes +-------- + +- Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). ([\#15298](https://github.com/matrix-org/synapse/issues/15298), [\#15300](https://github.com/matrix-org/synapse/issues/15300)) +- Fix a bug introduced in Synapse 1.75.0rc1 where the [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite) + would fail to open the SQLite database. ([\#15301](https://github.com/matrix-org/synapse/issues/15301)) + + Synapse 1.80.0rc1 (2023-03-21) ============================== diff --git a/changelog.d/15298.bugfix b/changelog.d/15298.bugfix deleted file mode 100644 index 8f29b08444fb..000000000000 --- a/changelog.d/15298.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). \ No newline at end of file diff --git a/changelog.d/15300.bugfix b/changelog.d/15300.bugfix deleted file mode 100644 index 8f29b08444fb..000000000000 --- a/changelog.d/15300.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in which the [`POST /_matrix/client/v3/rooms/{roomId}/report/{eventId}`](https://spec.matrix.org/v1.6/client-server-api/#post_matrixclientv3roomsroomidreporteventid) endpoint would return the wrong error if the user did not have permission to view the event. This aligns Synapse's implementation with [MSC2249](https://github.com/matrix-org/matrix-spec-proposals/pull/2249). \ No newline at end of file diff --git a/changelog.d/15301.bugfix b/changelog.d/15301.bugfix deleted file mode 100644 index c8f3628d3493..000000000000 --- a/changelog.d/15301.bugfix +++ /dev/null @@ -1,3 +0,0 @@ -Fix a bug introduced in Synapse 1.75.0rc1 where the [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite) -would fail to open the SQLite database. - diff --git a/debian/changelog b/debian/changelog index 32df14add676..ca231603087b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.80.0~rc2) stable; urgency=medium + + * New Synapse release 1.80.0rc2. + + -- Synapse Packaging team Wed, 22 Mar 2023 08:30:16 -0700 + matrix-synapse-py3 (1.80.0~rc1) stable; urgency=medium * New Synapse release 1.80.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 0ba5c9480a3f..19dc7c1536cf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.80.0rc1" +version = "1.80.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" From 9f7d6c6bc1b414d8f6591cc1d312a9c6b3a28980 Mon Sep 17 00:00:00 2001 From: Sean Quah Date: Tue, 28 Mar 2023 11:10:59 +0100 Subject: [PATCH 344/344] 1.80.0 --- CHANGES.md | 6 ++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 88cb2bdf259a..5f2a4a41eb42 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,9 @@ +Synapse 1.80.0 (2023-03-28) +=========================== + +No significant changes since 1.80.0rc2. + + Synapse 1.80.0rc2 (2023-03-22) ============================== diff --git a/debian/changelog b/debian/changelog index ca231603087b..98366d49166b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.80.0) stable; urgency=medium + + * New Synapse release 1.80.0. + + -- Synapse Packaging team Tue, 28 Mar 2023 11:10:33 +0100 + matrix-synapse-py3 (1.80.0~rc2) stable; urgency=medium * New Synapse release 1.80.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 19dc7c1536cf..05bf599703b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.80.0rc2" +version = "1.80.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0"