diff --git a/CHANGES.md b/CHANGES.md index b2cc138ee302..f055772ca070 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,56 @@ +Synapse 1.83.0rc1 (2023-05-02) +============================== + +Features +-------- + +- Experimental support to recursively provide relations per [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981). ([\#15315](https://github.com/matrix-org/synapse/issues/15315)) +- Experimental support for [MSC3970](https://github.com/matrix-org/matrix-spec-proposals/pull/3970): Scope transaction IDs to devices. ([\#15318](https://github.com/matrix-org/synapse/issues/15318)) +- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/experimental_features.html) to support per-user feature flags. ([\#15344](https://github.com/matrix-org/synapse/issues/15344)) +- Add a module API to send an HTTP push notification. ([\#15387](https://github.com/matrix-org/synapse/issues/15387)) +- Add an [admin API endpoint](https://matrix-org.github.io/synapse/v1.83/admin_api/statistics.html#get-largest-rooms-by-size-in-database) to query the largest rooms by disk space used in the database. ([\#15482](https://github.com/matrix-org/synapse/issues/15482)) + + +Bugfixes +-------- + +- Disable push rule evaluation for rooms excluded from sync. ([\#15361](https://github.com/matrix-org/synapse/issues/15361)) +- Fix a long-standing bug where cached server key results which were directly fetched would not be properly re-used. ([\#15417](https://github.com/matrix-org/synapse/issues/15417)) +- Fix a bug introduced in Synapse 1.73.0 where some experimental push rules were returned by default. ([\#15494](https://github.com/matrix-org/synapse/issues/15494)) + + +Improved Documentation +---------------------- + +- Add Nginx loadbalancing example with sticky mxid for workers. ([\#15411](https://github.com/matrix-org/synapse/issues/15411)) +- Update outdated development docs that mention restrictions in versions of SQLite that we no longer support. ([\#15498](https://github.com/matrix-org/synapse/issues/15498)) + + +Internal Changes +---------------- + +- Speedup tests by caching HomeServerConfig instances. ([\#15284](https://github.com/matrix-org/synapse/issues/15284)) +- Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). ([\#15356](https://github.com/matrix-org/synapse/issues/15356)) +- Always use multi-user device resync replication endpoints. ([\#15418](https://github.com/matrix-org/synapse/issues/15418)) +- Add column `full_user_id` to tables `profiles` and `user_filters`. ([\#15458](https://github.com/matrix-org/synapse/issues/15458)) +- Update support for [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) to allow always returning fallback-keys in a `/keys/claim` request. ([\#15462](https://github.com/matrix-org/synapse/issues/15462)) +- Improve type hints. ([\#15465](https://github.com/matrix-org/synapse/issues/15465), [\#15496](https://github.com/matrix-org/synapse/issues/15496), [\#15497](https://github.com/matrix-org/synapse/issues/15497)) +- Support claiming more than one OTK at a time. ([\#15468](https://github.com/matrix-org/synapse/issues/15468)) +- Bump types-pyyaml from 6.0.12.8 to 6.0.12.9. ([\#15471](https://github.com/matrix-org/synapse/issues/15471)) +- Bump pyasn1-modules from 0.2.8 to 0.3.0. ([\#15473](https://github.com/matrix-org/synapse/issues/15473)) +- Bump cryptography from 40.0.1 to 40.0.2. ([\#15474](https://github.com/matrix-org/synapse/issues/15474)) +- Bump types-netaddr from 0.8.0.7 to 0.8.0.8. ([\#15475](https://github.com/matrix-org/synapse/issues/15475)) +- Bump types-jsonschema from 4.17.0.6 to 4.17.0.7. ([\#15476](https://github.com/matrix-org/synapse/issues/15476)) +- Ask bug reporters to provide logs as text. ([\#15479](https://github.com/matrix-org/synapse/issues/15479)) +- Add a Nix flake for use as a development environment. ([\#15495](https://github.com/matrix-org/synapse/issues/15495)) +- Bump anyhow from 1.0.70 to 1.0.71. ([\#15507](https://github.com/matrix-org/synapse/issues/15507)) +- Bump types-pillow from 9.4.0.19 to 9.5.0.2. ([\#15508](https://github.com/matrix-org/synapse/issues/15508)) +- Bump packaging from 23.0 to 23.1. ([\#15510](https://github.com/matrix-org/synapse/issues/15510)) +- Bump types-requests from 2.28.11.16 to 2.29.0.0. ([\#15511](https://github.com/matrix-org/synapse/issues/15511)) +- Bump setuptools-rust from 1.5.2 to 1.6.0. ([\#15512](https://github.com/matrix-org/synapse/issues/15512)) +- Update the check_schema_delta script to account for when the schema version has been bumped locally. ([\#15466](https://github.com/matrix-org/synapse/issues/15466)) + + Synapse 1.82.0 (2023-04-25) =========================== diff --git a/changelog.d/15025.misc b/changelog.d/15025.misc new file mode 100644 index 000000000000..1f04d8572993 --- /dev/null +++ b/changelog.d/15025.misc @@ -0,0 +1 @@ +Use oEmbed to generate URL previews for YouTube Shorts. diff --git a/changelog.d/15224.feature b/changelog.d/15224.feature new file mode 100644 index 000000000000..5d8413f8be2b --- /dev/null +++ b/changelog.d/15224.feature @@ -0,0 +1 @@ +Add `forget_rooms_on_leave` config option to automatically forget rooms when users leave them or are removed from them. diff --git a/changelog.d/15284.misc b/changelog.d/15284.misc deleted file mode 100644 index 99d753f8f051..000000000000 --- a/changelog.d/15284.misc +++ /dev/null @@ -1 +0,0 @@ -Speedup tests by caching HomeServerConfig instances. diff --git a/changelog.d/15315.feature b/changelog.d/15315.feature deleted file mode 100644 index 30b2abdc62f6..000000000000 --- a/changelog.d/15315.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support to recursively provide relations per [MSC3981](https://github.com/matrix-org/matrix-spec-proposals/pull/3981). diff --git a/changelog.d/15318.feature b/changelog.d/15318.feature deleted file mode 100644 index 47bb2e17a7ca..000000000000 --- a/changelog.d/15318.feature +++ /dev/null @@ -1 +0,0 @@ -Experimental support for MSC3970: Scope transaction IDs to devices. diff --git a/changelog.d/15344.feature b/changelog.d/15344.feature deleted file mode 100644 index 44262e9bd87c..000000000000 --- a/changelog.d/15344.feature +++ /dev/null @@ -1 +0,0 @@ -Add an admin API endpoint to support per-user feature flags. diff --git a/changelog.d/15356.misc b/changelog.d/15356.misc deleted file mode 100644 index c09911e48d2f..000000000000 --- a/changelog.d/15356.misc +++ /dev/null @@ -1 +0,0 @@ -Add denormalised event stream ordering column to membership state tables for future use. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/15361.bugfix b/changelog.d/15361.bugfix deleted file mode 100644 index 2cd795e5766a..000000000000 --- a/changelog.d/15361.bugfix +++ /dev/null @@ -1 +0,0 @@ -Disable push rule evaluation for rooms excluded from sync. \ No newline at end of file diff --git a/changelog.d/15387.feature b/changelog.d/15387.feature deleted file mode 100644 index b36e33152049..000000000000 --- a/changelog.d/15387.feature +++ /dev/null @@ -1 +0,0 @@ -Add a module API to send an HTTP push notification. diff --git a/changelog.d/15411.doc b/changelog.d/15411.doc deleted file mode 100644 index c23a8df04a71..000000000000 --- a/changelog.d/15411.doc +++ /dev/null @@ -1 +0,0 @@ -Docs: Add Nginx loadbalancing example with sticky mxid for workers. diff --git a/changelog.d/15417.bugfix b/changelog.d/15417.bugfix deleted file mode 100644 index 300635cbdc11..000000000000 --- a/changelog.d/15417.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where cached key results which were directly fetched would not be properly re-used. diff --git a/changelog.d/15418.misc b/changelog.d/15418.misc deleted file mode 100644 index ca6f995a9c70..000000000000 --- a/changelog.d/15418.misc +++ /dev/null @@ -1 +0,0 @@ -Always use multi-user device resync replication endpoints. \ No newline at end of file diff --git a/changelog.d/15437.misc b/changelog.d/15437.misc new file mode 100644 index 000000000000..2dea23784f22 --- /dev/null +++ b/changelog.d/15437.misc @@ -0,0 +1 @@ +Make the `thread_id` column on `event_push_actions`, `event_push_actions_staging`, and `event_push_summary` non-null. diff --git a/changelog.d/15458.misc b/changelog.d/15458.misc deleted file mode 100644 index 5183161d2573..000000000000 --- a/changelog.d/15458.misc +++ /dev/null @@ -1 +0,0 @@ -Add column `full_user_id` to tables `profiles` and `user_filters`. diff --git a/changelog.d/15462.misc b/changelog.d/15462.misc deleted file mode 100644 index 36e4bffbc86b..000000000000 --- a/changelog.d/15462.misc +++ /dev/null @@ -1 +0,0 @@ -Update support for [MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) to allow always returning fallback-keys in a `/keys/claim` request. diff --git a/changelog.d/15465.misc b/changelog.d/15465.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15465.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15466.bugfix b/changelog.d/15466.bugfix deleted file mode 100644 index 299a7f0f0eba..000000000000 --- a/changelog.d/15466.bugfix +++ /dev/null @@ -1 +0,0 @@ -Update the check_schema_delta script to account for when the schema version has been bumped locally. diff --git a/changelog.d/15468.misc b/changelog.d/15468.misc deleted file mode 100644 index e0a94f36fdf4..000000000000 --- a/changelog.d/15468.misc +++ /dev/null @@ -1 +0,0 @@ -Support claiming more than one OTK at a time. diff --git a/changelog.d/15471.misc b/changelog.d/15471.misc deleted file mode 100644 index c5b16174c5f7..000000000000 --- a/changelog.d/15471.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pyyaml from 6.0.12.8 to 6.0.12.9. diff --git a/changelog.d/15473.misc b/changelog.d/15473.misc deleted file mode 100644 index 9540fbc1d2c0..000000000000 --- a/changelog.d/15473.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pyasn1-modules from 0.2.8 to 0.3.0. diff --git a/changelog.d/15474.misc b/changelog.d/15474.misc deleted file mode 100644 index 61f4abd515f4..000000000000 --- a/changelog.d/15474.misc +++ /dev/null @@ -1 +0,0 @@ -Bump cryptography from 40.0.1 to 40.0.2. diff --git a/changelog.d/15475.misc b/changelog.d/15475.misc deleted file mode 100644 index c86462652c9a..000000000000 --- a/changelog.d/15475.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-netaddr from 0.8.0.7 to 0.8.0.8. diff --git a/changelog.d/15476.misc b/changelog.d/15476.misc deleted file mode 100644 index 2ff909dffb93..000000000000 --- a/changelog.d/15476.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-jsonschema from 4.17.0.6 to 4.17.0.7. diff --git a/changelog.d/15479.misc b/changelog.d/15479.misc deleted file mode 100644 index 482addfeb7b6..000000000000 --- a/changelog.d/15479.misc +++ /dev/null @@ -1 +0,0 @@ -Ask bug reporters to provide logs as text. diff --git a/changelog.d/15482.feature b/changelog.d/15482.feature deleted file mode 100644 index f3e9f2a5b234..000000000000 --- a/changelog.d/15482.feature +++ /dev/null @@ -1 +0,0 @@ -Add admin endpoint to query the largest rooms by disk space used in the database. diff --git a/changelog.d/15494.bugfix b/changelog.d/15494.bugfix deleted file mode 100644 index 89f9262200c9..000000000000 --- a/changelog.d/15494.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse 1.73.0 where some experimental push rules were returned by default. diff --git a/changelog.d/15495.misc b/changelog.d/15495.misc deleted file mode 100644 index ff7b5cbddf3b..000000000000 --- a/changelog.d/15495.misc +++ /dev/null @@ -1 +0,0 @@ -Add a Nix flake for use as a development environment. \ No newline at end of file diff --git a/changelog.d/15496.misc b/changelog.d/15496.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15496.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15497.misc b/changelog.d/15497.misc deleted file mode 100644 index 93ceaeafc9b9..000000000000 --- a/changelog.d/15497.misc +++ /dev/null @@ -1 +0,0 @@ -Improve type hints. diff --git a/changelog.d/15498.doc b/changelog.d/15498.doc deleted file mode 100644 index 78715b8a3eb6..000000000000 --- a/changelog.d/15498.doc +++ /dev/null @@ -1 +0,0 @@ -Update outdated development docs that mention restrictions in versions of SQLite that we no longer support. diff --git a/changelog.d/15507.misc b/changelog.d/15507.misc deleted file mode 100644 index 061f19e1c940..000000000000 --- a/changelog.d/15507.misc +++ /dev/null @@ -1 +0,0 @@ -Bump anyhow from 1.0.70 to 1.0.71. diff --git a/changelog.d/15508.misc b/changelog.d/15508.misc deleted file mode 100644 index 3f3f5d98f980..000000000000 --- a/changelog.d/15508.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-pillow from 9.4.0.19 to 9.5.0.2. diff --git a/changelog.d/15509.misc b/changelog.d/15509.misc new file mode 100644 index 000000000000..1eb26c83b7c6 --- /dev/null +++ b/changelog.d/15509.misc @@ -0,0 +1 @@ +Bump pyicu from 2.10.2 to 2.11. diff --git a/changelog.d/15510.misc b/changelog.d/15510.misc deleted file mode 100644 index bff25e824b0b..000000000000 --- a/changelog.d/15510.misc +++ /dev/null @@ -1 +0,0 @@ -Bump packaging from 23.0 to 23.1. diff --git a/changelog.d/15511.misc b/changelog.d/15511.misc deleted file mode 100644 index 183e0c69d20d..000000000000 --- a/changelog.d/15511.misc +++ /dev/null @@ -1 +0,0 @@ -Bump types-requests from 2.28.11.16 to 2.29.0.0. diff --git a/changelog.d/15512.misc b/changelog.d/15512.misc deleted file mode 100644 index 90bdf170d3e9..000000000000 --- a/changelog.d/15512.misc +++ /dev/null @@ -1 +0,0 @@ -Bump setuptools-rust from 1.5.2 to 1.6.0. diff --git a/changelog.d/15514.misc b/changelog.d/15514.misc deleted file mode 100644 index f24c18cf479b..000000000000 --- a/changelog.d/15514.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce the size of the HTTP connection pool for non-pushers. diff --git a/changelog.d/15522.misc b/changelog.d/15522.misc new file mode 100644 index 000000000000..a5a229e4a092 --- /dev/null +++ b/changelog.d/15522.misc @@ -0,0 +1 @@ +Remove references to supporting per-user flag for [MSC2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654) (#15522). diff --git a/changelog.d/15527.misc b/changelog.d/15527.misc new file mode 100644 index 000000000000..752a32adeb5d --- /dev/null +++ b/changelog.d/15527.misc @@ -0,0 +1 @@ +Don't use a trusted key server when running the demo scripts. \ No newline at end of file diff --git a/changelog.d/15529.misc b/changelog.d/15529.misc new file mode 100644 index 000000000000..7ad424d8dfd0 --- /dev/null +++ b/changelog.d/15529.misc @@ -0,0 +1 @@ +Speed up rebuilding of the user directory for local users. diff --git a/changelog.d/15531.misc b/changelog.d/15531.misc new file mode 100644 index 000000000000..6d4da961b520 --- /dev/null +++ b/changelog.d/15531.misc @@ -0,0 +1 @@ +Speed up deleting of old rows in `event_push_actions`. diff --git a/changelog.d/15532.misc b/changelog.d/15532.misc new file mode 100644 index 000000000000..e58273f2977d --- /dev/null +++ b/changelog.d/15532.misc @@ -0,0 +1 @@ +Install the `xmlsec` package and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. diff --git a/changelog.d/15533.misc b/changelog.d/15533.misc new file mode 100644 index 000000000000..8ed23526ef01 --- /dev/null +++ b/changelog.d/15533.misc @@ -0,0 +1 @@ +Install the `xmlsec` package and switch back to the upstream [cachix/devenv](https://github.com/cachix/devenv) repo in the nix development environment. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index f6e8720e5894..a50fda69a8f1 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.83.0~rc1) stable; urgency=medium + + * New Synapse release 1.83.0rc1. + + -- Synapse Packaging team Tue, 02 May 2023 15:56:38 +0100 + matrix-synapse-py3 (1.82.0) stable; urgency=medium * New Synapse release 1.82.0. diff --git a/demo/start.sh b/demo/start.sh index fdd75816fb26..06ec6f985f35 100755 --- a/demo/start.sh +++ b/demo/start.sh @@ -46,7 +46,7 @@ for port in 8080 8081 8082; do echo '' # Warning, this heredoc depends on the interaction of tabs and spaces. - # Please don't accidentaly bork me with your fancy settings. + # Please don't accidentally bork me with your fancy settings. listeners=$(cat <<-PORTLISTENERS # Configure server to listen on both $https_port and $port # This overides some of the default settings above @@ -80,12 +80,8 @@ for port in 8080 8081 8082; do echo "tls_certificate_path: \"$DIR/$port/localhost:$port.tls.crt\"" echo "tls_private_key_path: \"$DIR/$port/localhost:$port.tls.key\"" - # Ignore keys from the trusted keys server - echo '# Ignore keys from the trusted keys server' - echo 'trusted_key_servers:' - echo ' - server_name: "matrix.org"' - echo ' accept_keys_insecurely: true' - echo '' + # Request keys directly from servers contacted over federation + echo 'trusted_key_servers: []' # Allow the servers to communicate over localhost. allow_list=$(cat <<-ALLOW_LIST diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index ade77d49261c..a8e5ddad9d48 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -57,6 +57,7 @@ - [Account Validity](admin_api/account_validity.md) - [Background Updates](usage/administration/admin_api/background_updates.md) - [Event Reports](admin_api/event_reports.md) + - [Experimental Features](admin_api/experimental_features.md) - [Media](admin_api/media_admin_api.md) - [Purge History](admin_api/purge_history_api.md) - [Register Users](admin_api/register_api.md) diff --git a/docs/admin_api/experimental_features.md b/docs/admin_api/experimental_features.md index c1aebe4b01a8..07b630915d59 100644 --- a/docs/admin_api/experimental_features.md +++ b/docs/admin_api/experimental_features.md @@ -1,10 +1,12 @@ # Experimental Features API This API allows a server administrator to enable or disable some experimental features on a per-user -basis. Currently supported features are [msc3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy -presence state enabled, [msc2654](https://github.com/matrix-org/matrix-spec-proposals/pull/2654): enable unread counts, -[msc3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications -for another client, and [msc3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require +basis. The currently supported features are: +- [MSC3026](https://github.com/matrix-org/matrix-spec-proposals/pull/3026): busy +presence state enabled +- [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications +for another client +- [MSC3967](https://github.com/matrix-org/matrix-spec-proposals/pull/3967): do not require UIA when first uploading cross-signing keys. @@ -19,7 +21,7 @@ provide a body containing the user id and listing the features to enable/disable { "features": { "msc3026":true, - "msc2654":true + "msc3881":true } } ``` @@ -46,7 +48,6 @@ user like so: { "features": { "msc3026": true, - "msc2654": true, "msc3881": false, "msc3967": false } diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index b6516191e813..14c21f73fe5b 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3699,6 +3699,16 @@ default_power_level_content_override: trusted_private_chat: null public_chat: null ``` +--- +### `forget_rooms_on_leave` + +Set to true to automatically forget rooms for users when they leave them, either +normally or via a kick or ban. Defaults to false. + +Example configuration: +```yaml +forget_rooms_on_leave: false +``` --- ## Opentracing diff --git a/flake.lock b/flake.lock index 85886b730f54..d1c933e9aa01 100644 --- a/flake.lock +++ b/flake.lock @@ -8,16 +8,16 @@ "pre-commit-hooks": "pre-commit-hooks" }, "locked": { - "lastModified": 1682534083, - "narHash": "sha256-lBgFaLNHRQtD3InZbBXzIS8HgZUgcPJ6jiqGa4FJPrk=", - "owner": "anoadragon453", + "lastModified": 1683102061, + "narHash": "sha256-kOphT6V0uQUlFNBP3GBjs7DAU7fyZGGqCs9ue1gNY6E=", + "owner": "cachix", "repo": "devenv", - "rev": "9694bd0a845dd184d4468cc3d3461089aace787a", + "rev": "ff1f29e41756553174d596cafe3a9fa77595100b", "type": "github" }, "original": { - "owner": "anoadragon453", - "ref": "anoa/fix_languages_python", + "owner": "cachix", + "ref": "main", "repo": "devenv", "type": "github" } diff --git a/flake.nix b/flake.nix index 91916d9abb51..76243d060129 100644 --- a/flake.nix +++ b/flake.nix @@ -50,11 +50,7 @@ # Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS). systems.url = "github:nix-systems/default"; # A development environment manager built on Nix. See https://devenv.sh. - # This is temporarily overridden to a fork that fixes a quirk between - # devenv's service and python language features. This can be removed - # when https://github.com/cachix/devenv/pull/559 is merged upstream. - devenv.url = "github:anoadragon453/devenv/anoa/fix_languages_python"; - #devenv.url = "github:cachix/devenv/main"; + devenv.url = "github:cachix/devenv/main"; # Rust toolchains and rust-analyzer nightly. fenix = { url = "github:nix-community/fenix"; @@ -97,6 +93,7 @@ # Native dependencies for unit tests (SyTest also requires OpenSSL). openssl + xmlsec # Native dependencies for running Complement. olm diff --git a/poetry.lock b/poetry.lock index 2ca6d89cb614..8e82fddebae9 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1973,13 +1973,13 @@ plugins = ["importlib-metadata"] [[package]] name = "pyicu" -version = "2.10.2" +version = "2.11" description = "Python extension wrapping the ICU C++ API" category = "main" optional = true python-versions = "*" files = [ - {file = "PyICU-2.10.2.tar.gz", hash = "sha256:0c3309eea7fab6857507ace62403515b60fe096cbfb4f90d14f55ff75c5441c1"}, + {file = "PyICU-2.11.tar.gz", hash = "sha256:3ab531264cfe9132b3d2ac5d708da9a4649d25f6e6813730ac88cf040a08a844"}, ] [[package]] diff --git a/pyproject.toml b/pyproject.toml index c08352e4d3fc..caf69cc53f18 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.82.0" +version = "1.83.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" diff --git a/synapse/config/room.py b/synapse/config/room.py index 4a7ac0054086..b6696cd129c3 100644 --- a/synapse/config/room.py +++ b/synapse/config/room.py @@ -75,3 +75,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: % preset ) # We validate the actual overrides when we try to apply them. + + # When enabled, users will forget rooms when they leave them, either via a + # leave, kick or ban. + self.forget_on_leave = config.get("forget_rooms_on_leave", False) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index ed805d6ec87e..fbef600acd8f 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -16,7 +16,7 @@ import logging import random from http import HTTPStatus -from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple from synapse import types from synapse.api.constants import ( @@ -38,7 +38,10 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN +from synapse.handlers.state_deltas import MatchChange, StateDeltasHandler from synapse.logging import opentracing +from synapse.metrics import event_processing_positions +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.module_api import NOT_SPAM from synapse.types import ( JsonDict, @@ -280,9 +283,25 @@ async def _user_left_room(self, target: UserID, room_id: str) -> None: """ raise NotImplementedError() - @abc.abstractmethod async def forget(self, user: UserID, room_id: str) -> None: - raise NotImplementedError() + user_id = user.to_string() + + member = await self._storage_controllers.state.get_current_state_event( + room_id=room_id, event_type=EventTypes.Member, state_key=user_id + ) + membership = member.membership if member else None + + if membership is not None and membership not in [ + Membership.LEAVE, + Membership.BAN, + ]: + raise SynapseError(400, "User %s in room %s" % (user_id, room_id)) + + # In normal case this call is only required if `membership` is not `None`. + # But: After the last member had left the room, the background update + # `_background_remove_left_rooms` is deleting rows related to this room from + # the table `current_state_events` and `get_current_state_events` is `None`. + await self.store.forget(user_id, room_id) async def ratelimit_multiple_invites( self, @@ -2046,25 +2065,141 @@ async def _user_left_room(self, target: UserID, room_id: str) -> None: """Implements RoomMemberHandler._user_left_room""" user_left_room(self.distributor, target, room_id) - async def forget(self, user: UserID, room_id: str) -> None: - user_id = user.to_string() - member = await self._storage_controllers.state.get_current_state_event( - room_id=room_id, event_type=EventTypes.Member, state_key=user_id - ) - membership = member.membership if member else None +class RoomForgetterHandler(StateDeltasHandler): + """Forgets rooms when they are left, when enabled in the homeserver config. - if membership is not None and membership not in [ - Membership.LEAVE, - Membership.BAN, - ]: - raise SynapseError(400, "User %s in room %s" % (user_id, room_id)) + For the purposes of this feature, kicks, bans and "leaves" via state resolution + weirdness are all considered to be leaves. - # In normal case this call is only required if `membership` is not `None`. - # But: After the last member had left the room, the background update - # `_background_remove_left_rooms` is deleting rows related to this room from - # the table `current_state_events` and `get_current_state_events` is `None`. - await self.store.forget(user_id, room_id) + Derived from `StatsHandler` and `UserDirectoryHandler`. + """ + + def __init__(self, hs: "HomeServer"): + super().__init__(hs) + + self._hs = hs + self._store = hs.get_datastores().main + self._storage_controllers = hs.get_storage_controllers() + self._clock = hs.get_clock() + self._notifier = hs.get_notifier() + self._room_member_handler = hs.get_room_member_handler() + + # The current position in the current_state_delta stream + self.pos: Optional[int] = None + + # Guard to ensure we only process deltas one at a time + self._is_processing = False + + if hs.config.worker.run_background_tasks: + self._notifier.add_replication_callback(self.notify_new_event) + + # We kick this off to pick up outstanding work from before the last restart. + self._clock.call_later(0, self.notify_new_event) + + def notify_new_event(self) -> None: + """Called when there may be more deltas to process""" + if self._is_processing: + return + + self._is_processing = True + + async def process() -> None: + try: + await self._unsafe_process() + finally: + self._is_processing = False + + run_as_background_process("room_forgetter.notify_new_event", process) + + async def _unsafe_process(self) -> None: + # If self.pos is None then means we haven't fetched it from DB + if self.pos is None: + self.pos = await self._store.get_room_forgetter_stream_pos() + room_max_stream_ordering = self._store.get_room_max_stream_ordering() + if self.pos > room_max_stream_ordering: + # apparently, we've processed more events than exist in the database! + # this can happen if events are removed with history purge or similar. + logger.warning( + "Event stream ordering appears to have gone backwards (%i -> %i): " + "rewinding room forgetter processor", + self.pos, + room_max_stream_ordering, + ) + self.pos = room_max_stream_ordering + + if not self._hs.config.room.forget_on_leave: + # Update the processing position, so that if the server admin turns the + # feature on at a later date, we don't decide to forget every room that + # has ever been left in the past. + self.pos = self._store.get_room_max_stream_ordering() + await self._store.update_room_forgetter_stream_pos(self.pos) + return + + # Loop round handling deltas until we're up to date + + while True: + # Be sure to read the max stream_ordering *before* checking if there are any outstanding + # deltas, since there is otherwise a chance that we could miss updates which arrive + # after we check the deltas. + room_max_stream_ordering = self._store.get_room_max_stream_ordering() + if self.pos == room_max_stream_ordering: + break + + logger.debug( + "Processing room forgetting %s->%s", self.pos, room_max_stream_ordering + ) + ( + max_pos, + deltas, + ) = await self._storage_controllers.state.get_current_state_deltas( + self.pos, room_max_stream_ordering + ) + + logger.debug("Handling %d state deltas", len(deltas)) + await self._handle_deltas(deltas) + + self.pos = max_pos + + # Expose current event processing position to prometheus + event_processing_positions.labels("room_forgetter").set(max_pos) + + await self._store.update_room_forgetter_stream_pos(max_pos) + + async def _handle_deltas(self, deltas: List[Dict[str, Any]]) -> None: + """Called with the state deltas to process""" + for delta in deltas: + typ = delta["type"] + state_key = delta["state_key"] + room_id = delta["room_id"] + event_id = delta["event_id"] + prev_event_id = delta["prev_event_id"] + + if typ != EventTypes.Member: + continue + + if not self._hs.is_mine_id(state_key): + continue + + change = await self._get_key_change( + prev_event_id, + event_id, + key_name="membership", + public_value=Membership.JOIN, + ) + is_leave = change is MatchChange.now_false + + if is_leave: + try: + await self._room_member_handler.forget( + UserID.from_string(state_key), room_id + ) + except SynapseError as e: + if e.code == 400: + # The user is back in the room. + pass + else: + raise def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]: diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 76e36b8a6d53..e8ff1ad063d6 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -137,6 +137,3 @@ async def _user_left_room(self, target: UserID, room_id: str) -> None: await self._notify_change_client( user_id=target.to_string(), room_id=room_id, change="left" ) - - async def forget(self, target: UserID, room_id: str) -> None: - raise RuntimeError("Cannot forget rooms on workers.") diff --git a/synapse/http/client.py b/synapse/http/client.py index 164abe9fc7df..91fe474f36d9 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -768,7 +768,6 @@ class SimpleHttpClient(BaseHttpClient): request if it were otherwise caught in a blacklist. use_proxy: Whether proxy settings should be discovered and used from conventional environment variables. - connection_pool: The connection pool to use for this client's agent. """ def __init__( @@ -778,7 +777,6 @@ def __init__( ip_whitelist: Optional[IPSet] = None, ip_blacklist: Optional[IPSet] = None, use_proxy: bool = False, - connection_pool: Optional[HTTPConnectionPool] = None, ): super().__init__(hs, treq_args=treq_args) self._ip_whitelist = ip_whitelist @@ -791,12 +789,22 @@ def __init__( self.reactor, self._ip_whitelist, self._ip_blacklist ) + # the pusher makes lots of concurrent SSL connections to Sygnal, and tends to + # do so in batches, so we need to allow the pool to keep lots of idle + # connections around. + pool = HTTPConnectionPool(self.reactor) + # XXX: The justification for using the cache factor here is that larger + # instances will need both more cache and more connections. + # Still, this should probably be a separate dial + pool.maxPersistentPerHost = max(int(100 * hs.config.caches.global_factor), 5) + pool.cachedConnectionTimeout = 2 * 60 + self.agent: IAgent = ProxyAgent( self.reactor, hs.get_reactor(), connectTimeout=15, contextFactory=self.hs.get_http_client_context_factory(), - pool=connection_pool, + pool=pool, use_proxy=use_proxy, ) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index e628b484a998..e91ee05e9960 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -143,8 +143,7 @@ def __init__(self, hs: "HomeServer", pusher_config: PusherConfig): ) self.url = url - self.http_client = hs.get_pusher_http_client() - + self.http_client = hs.get_proxied_blacklisted_http_client() self.data_minus_url = {} self.data_minus_url.update(self.data) del self.data_minus_url["url"] diff --git a/synapse/res/providers.json b/synapse/res/providers.json index 7b9958e45464..2dc9fec8e314 100644 --- a/synapse/res/providers.json +++ b/synapse/res/providers.json @@ -11,5 +11,18 @@ "url": "https://publish.twitter.com/oembed" } ] + }, + { + "provider_name": "YouTube Shorts", + "provider_url": "http://www.youtube.com/", + "endpoints": [ + { + "schemes": [ + "https://youtube.com/shorts/*", + "https://*.youtube.com/shorts/*" + ], + "url": "https://www.youtube.com/oembed" + } + ] } ] diff --git a/synapse/rest/admin/experimental_features.py b/synapse/rest/admin/experimental_features.py index 1d409ac2b7b0..abf273af100e 100644 --- a/synapse/rest/admin/experimental_features.py +++ b/synapse/rest/admin/experimental_features.py @@ -33,7 +33,6 @@ class ExperimentalFeature(str, Enum): """ MSC3026 = "msc3026" - MSC2654 = "msc2654" MSC3881 = "msc3881" MSC3967 = "msc3967" diff --git a/synapse/server.py b/synapse/server.py index 75a902d64de4..e597627a6d67 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -27,7 +27,6 @@ from twisted.internet.interfaces import IOpenSSLContextFactory from twisted.internet.tcp import Port -from twisted.web.client import HTTPConnectionPool from twisted.web.iweb import IPolicyForHTTPS from twisted.web.resource import Resource @@ -94,7 +93,11 @@ ) from synapse.handlers.room_batch import RoomBatchHandler from synapse.handlers.room_list import RoomListHandler -from synapse.handlers.room_member import RoomMemberHandler, RoomMemberMasterHandler +from synapse.handlers.room_member import ( + RoomForgetterHandler, + RoomMemberHandler, + RoomMemberMasterHandler, +) from synapse.handlers.room_member_worker import RoomMemberWorkerHandler from synapse.handlers.room_summary import RoomSummaryHandler from synapse.handlers.search import SearchHandler @@ -233,6 +236,7 @@ class HomeServer(metaclass=abc.ABCMeta): "message", "pagination", "profile", + "room_forgetter", "stats", ] @@ -454,26 +458,6 @@ def get_proxied_blacklisted_http_client(self) -> SimpleHttpClient: use_proxy=True, ) - @cache_in_self - def get_pusher_http_client(self) -> SimpleHttpClient: - # the pusher makes lots of concurrent SSL connections to Sygnal, and tends to - # do so in batches, so we need to allow the pool to keep lots of idle - # connections around. - pool = HTTPConnectionPool(self.get_reactor()) - # XXX: The justification for using the cache factor here is that larger - # instances will need both more cache and more connections. - # Still, this should probably be a separate dial - pool.maxPersistentPerHost = max(int(100 * self.config.caches.global_factor), 5) - pool.cachedConnectionTimeout = 2 * 60 - - return SimpleHttpClient( - self, - ip_whitelist=self.config.server.ip_range_whitelist, - ip_blacklist=self.config.server.ip_range_blacklist, - use_proxy=True, - connection_pool=pool, - ) - @cache_in_self def get_federation_http_client(self) -> MatrixFederationHttpClient: """ @@ -847,6 +831,10 @@ def get_account_handler(self) -> AccountHandler: def get_push_rules_handler(self) -> PushRulesHandler: return PushRulesHandler(self) + @cache_in_self + def get_room_forgetter_handler(self) -> RoomForgetterHandler: + return RoomForgetterHandler(self) + @cache_in_self def get_outbound_redis_connection(self) -> "ConnectionHandler": """ diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index a99aea89261f..ca085ef8000f 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -561,6 +561,50 @@ async def updater(progress: JsonDict, batch_size: int) -> int: updater, oneshot=True ) + def register_background_validate_constraint( + self, update_name: str, constraint_name: str, table: str + ) -> None: + """Helper for store classes to do a background validate constraint. + + This only applies on PostgreSQL. + + To use: + + 1. use a schema delta file to add a background update. Example: + INSERT INTO background_updates (update_name, progress_json) VALUES + ('validate_my_constraint', '{}'); + + 2. In the Store constructor, call this method + + Args: + update_name: update_name to register for + constraint_name: name of constraint to validate + table: table the constraint is applied to + """ + + def runner(conn: Connection) -> None: + c = conn.cursor() + + sql = f""" + ALTER TABLE {table} VALIDATE CONSTRAINT {constraint_name}; + """ + logger.debug("[SQL] %s", sql) + c.execute(sql) + + async def updater(progress: JsonDict, batch_size: int) -> int: + assert isinstance( + self.db_pool.engine, engines.PostgresEngine + ), "validate constraint background update registered for non-Postres database" + + logger.info("Validating constraint %s to %s", constraint_name, table) + await self.db_pool.runWithConnection(runner) + await self._end_background_update(update_name) + return 1 + + self._background_update_handlers[update_name] = _BackgroundUpdateHandler( + updater, oneshot=True + ) + async def create_index_in_background( self, index_name: str, diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 1f5f5eb6f8c7..313cf1a8d086 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -386,13 +386,20 @@ def execute_batch(self, sql: str, args: Iterable[Iterable[Any]]) -> None: self.executemany(sql, args) def execute_values( - self, sql: str, values: Iterable[Iterable[Any]], fetch: bool = True + self, + sql: str, + values: Iterable[Iterable[Any]], + template: Optional[str] = None, + fetch: bool = True, ) -> List[Tuple]: """Corresponds to psycopg2.extras.execute_values. Only available when using postgres. The `fetch` parameter must be set to False if the query does not return rows (e.g. INSERTs). + + The `template` is the snippet to merge to every item in argslist to + compose the query. """ assert isinstance(self.database_engine, PostgresEngine) from psycopg2.extras import execute_values @@ -400,7 +407,9 @@ def execute_values( return self._do_execute( # TODO: is it safe for values to be Iterable[Iterable[Any]] here? # https://www.psycopg.org/docs/extras.html?highlight=execute_batch#psycopg2.extras.execute_values says values should be Sequence[Sequence] - lambda the_sql: execute_values(self.txn, the_sql, values, fetch=fetch), + lambda the_sql: execute_values( + self.txn, the_sql, values, template=template, fetch=fetch + ), sql, ) diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index eeccf5db2433..2e98a29fef5a 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -100,7 +100,6 @@ ) from synapse.storage.databases.main.receipts import ReceiptsWorkerStore from synapse.storage.databases.main.stream import StreamWorkerStore -from synapse.types import JsonDict from synapse.util import json_encoder from synapse.util.caches.descriptors import cached @@ -289,180 +288,22 @@ def __init__( unique=True, ) - self.db_pool.updates.register_background_update_handler( - "event_push_backfill_thread_id", - self._background_backfill_thread_id, + self.db_pool.updates.register_background_validate_constraint( + "event_push_actions_staging_thread_id", + constraint_name="event_push_actions_staging_thread_id", + table="event_push_actions_staging", ) - - # Indexes which will be used to quickly make the thread_id column non-null. - self.db_pool.updates.register_background_index_update( - "event_push_actions_thread_id_null", - index_name="event_push_actions_thread_id_null", + self.db_pool.updates.register_background_validate_constraint( + "event_push_actions_thread_id", + constraint_name="event_push_actions_thread_id", table="event_push_actions", - columns=["thread_id"], - where_clause="thread_id IS NULL", ) - self.db_pool.updates.register_background_index_update( - "event_push_summary_thread_id_null", - index_name="event_push_summary_thread_id_null", + self.db_pool.updates.register_background_validate_constraint( + "event_push_summary_thread_id", + constraint_name="event_push_summary_thread_id", table="event_push_summary", - columns=["thread_id"], - where_clause="thread_id IS NULL", ) - # Check ASAP (and then later, every 1s) to see if we have finished - # background updates the event_push_actions and event_push_summary tables. - self._clock.call_later(0.0, self._check_event_push_backfill_thread_id) - self._event_push_backfill_thread_id_done = False - - @wrap_as_background_process("check_event_push_backfill_thread_id") - async def _check_event_push_backfill_thread_id(self) -> None: - """ - Has thread_id finished backfilling? - - If not, we need to just-in-time update it so the queries work. - """ - done = await self.db_pool.updates.has_completed_background_update( - "event_push_backfill_thread_id" - ) - - if done: - self._event_push_backfill_thread_id_done = True - else: - # Reschedule to run. - self._clock.call_later(15.0, self._check_event_push_backfill_thread_id) - - async def _background_backfill_thread_id( - self, progress: JsonDict, batch_size: int - ) -> int: - """ - Fill in the thread_id field for event_push_actions and event_push_summary. - - This is preparatory so that it can be made non-nullable in the future. - - Because all current (null) data is done in an unthreaded manner this - simply assumes it is on the "main" timeline. Since event_push_actions - are periodically cleared it is not possible to correctly re-calculate - the thread_id. - """ - event_push_actions_done = progress.get("event_push_actions_done", False) - - def add_thread_id_txn( - txn: LoggingTransaction, start_stream_ordering: int - ) -> int: - sql = """ - SELECT stream_ordering - FROM event_push_actions - WHERE - thread_id IS NULL - AND stream_ordering > ? - ORDER BY stream_ordering - LIMIT ? - """ - txn.execute(sql, (start_stream_ordering, batch_size)) - - # No more rows to process. - rows = txn.fetchall() - if not rows: - progress["event_push_actions_done"] = True - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - return 0 - - # Update the thread ID for any of those rows. - max_stream_ordering = rows[-1][0] - - sql = """ - UPDATE event_push_actions - SET thread_id = 'main' - WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL - """ - txn.execute( - sql, - ( - start_stream_ordering, - max_stream_ordering, - ), - ) - - # Update progress. - processed_rows = txn.rowcount - progress["max_event_push_actions_stream_ordering"] = max_stream_ordering - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - - return processed_rows - - def add_thread_id_summary_txn(txn: LoggingTransaction) -> int: - min_user_id = progress.get("max_summary_user_id", "") - min_room_id = progress.get("max_summary_room_id", "") - - # Slightly overcomplicated query for getting the Nth user ID / room - # ID tuple, or the last if there are less than N remaining. - sql = """ - SELECT user_id, room_id FROM ( - SELECT user_id, room_id FROM event_push_summary - WHERE (user_id, room_id) > (?, ?) - AND thread_id IS NULL - ORDER BY user_id, room_id - LIMIT ? - ) AS e - ORDER BY user_id DESC, room_id DESC - LIMIT 1 - """ - - txn.execute(sql, (min_user_id, min_room_id, batch_size)) - row = txn.fetchone() - if not row: - return 0 - - max_user_id, max_room_id = row - - sql = """ - UPDATE event_push_summary - SET thread_id = 'main' - WHERE - (?, ?) < (user_id, room_id) AND (user_id, room_id) <= (?, ?) - AND thread_id IS NULL - """ - txn.execute(sql, (min_user_id, min_room_id, max_user_id, max_room_id)) - processed_rows = txn.rowcount - - progress["max_summary_user_id"] = max_user_id - progress["max_summary_room_id"] = max_room_id - self.db_pool.updates._background_update_progress_txn( - txn, "event_push_backfill_thread_id", progress - ) - - return processed_rows - - # First update the event_push_actions table, then the event_push_summary table. - # - # Note that the event_push_actions_staging table is ignored since it is - # assumed that items in that table will only exist for a short period of - # time. - if not event_push_actions_done: - result = await self.db_pool.runInteraction( - "event_push_backfill_thread_id", - add_thread_id_txn, - progress.get("max_event_push_actions_stream_ordering", 0), - ) - else: - result = await self.db_pool.runInteraction( - "event_push_backfill_thread_id", - add_thread_id_summary_txn, - ) - - # Only done after the event_push_summary table is done. - if not result: - await self.db_pool.updates._end_background_update( - "event_push_backfill_thread_id" - ) - - return result - async def get_unread_counts_by_room_for_user(self, user_id: str) -> Dict[str, int]: """Get the notification count by room for a user. Only considers notifications, not highlight or unread counts, and threads are currently aggregated under their room. @@ -711,25 +552,6 @@ def _get_thread(thread_id: str) -> NotifCounts: (ReceiptTypes.READ, ReceiptTypes.READ_PRIVATE), ) - # First ensure that the existing rows have an updated thread_id field. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - # First we pull the counts from the summary table. # # We check that `last_receipt_stream_ordering` matches the stream ordering of the @@ -1545,25 +1367,6 @@ def _handle_new_receipts_for_notifs_txn(self, txn: LoggingTransaction) -> bool: (room_id, user_id, stream_ordering, *thread_args), ) - # First ensure that the existing rows have an updated thread_id field. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - (MAIN_TIMELINE, room_id, user_id), - ) - # Fetch the notification counts between the stream ordering of the # latest receipt and what was previously summarised. unread_counts = self._get_notif_unread_count_for_user_room( @@ -1698,19 +1501,6 @@ def _rotate_notifs_before_txn( rotate_to_stream_ordering: The new maximum event stream ordering to summarise. """ - # Ensure that any new actions have an updated thread_id. - if not self._event_push_backfill_thread_id_done: - txn.execute( - """ - UPDATE event_push_actions - SET thread_id = ? - WHERE ? < stream_ordering AND stream_ordering <= ? AND thread_id IS NULL - """, - (MAIN_TIMELINE, old_rotate_stream_ordering, rotate_to_stream_ordering), - ) - - # XXX Do we need to update summaries here too? - # Calculate the new counts that should be upserted into event_push_summary sql = """ SELECT user_id, room_id, thread_id, @@ -1773,20 +1563,6 @@ def _rotate_notifs_before_txn( logger.info("Rotating notifications, handling %d rows", len(summaries)) - # Ensure that any updated threads have the proper thread_id. - if not self._event_push_backfill_thread_id_done: - txn.execute_batch( - """ - UPDATE event_push_summary - SET thread_id = ? - WHERE room_id = ? AND user_id = ? AND thread_id is NULL - """, - [ - (MAIN_TIMELINE, room_id, user_id) - for user_id, room_id, _ in summaries - ], - ) - self.db_pool.simple_upsert_many_txn( txn, table="event_push_summary", @@ -1836,6 +1612,15 @@ def remove_old_push_actions_that_have_rotated_txn( # deletes. batch_size = self._rotate_count + if isinstance(self.database_engine, PostgresEngine): + # Temporarily disable sequential scans in this transaction. We + # need to do this as the postgres statistics don't take into + # account the `highlight = 0` part when estimating the + # distribution of `stream_ordering`. I.e. since we keep old + # highlight rows the query planner thinks there are way more old + # rows to delete than there actually are. + txn.execute("SET LOCAL enable_seqscan=off") + txn.execute( """ SELECT stream_ordering FROM event_push_actions diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index daad58291a8b..e068f27a1079 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -82,7 +82,7 @@ class EventIdMembership: membership: str -class RoomMemberWorkerStore(EventsWorkerStore): +class RoomMemberWorkerStore(EventsWorkerStore, CacheInvalidationWorkerStore): def __init__( self, database: DatabasePool, @@ -1372,6 +1372,50 @@ def _is_local_host_in_room_ignoring_users_txn( _is_local_host_in_room_ignoring_users_txn, ) + async def forget(self, user_id: str, room_id: str) -> None: + """Indicate that user_id wishes to discard history for room_id.""" + + def f(txn: LoggingTransaction) -> None: + self.db_pool.simple_update_txn( + txn, + table="room_memberships", + keyvalues={"user_id": user_id, "room_id": room_id}, + updatevalues={"forgotten": 1}, + ) + + self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id)) + self._invalidate_cache_and_stream( + txn, self.get_forgotten_rooms_for_user, (user_id,) + ) + + await self.db_pool.runInteraction("forget_membership", f) + + async def get_room_forgetter_stream_pos(self) -> int: + """Get the stream position of the background process to forget rooms when left + by users. + """ + return await self.db_pool.simple_select_one_onecol( + table="room_forgetter_stream_pos", + keyvalues={}, + retcol="stream_id", + desc="room_forgetter_stream_pos", + ) + + async def update_room_forgetter_stream_pos(self, stream_id: int) -> None: + """Update the stream position of the background process to forget rooms when + left by users. + + Must only be used by the worker running the background process. + """ + assert self.hs.config.worker.run_background_tasks + + await self.db_pool.simple_update_one( + table="room_forgetter_stream_pos", + keyvalues={}, + updatevalues={"stream_id": stream_id}, + desc="room_forgetter_stream_pos", + ) + class RoomMemberBackgroundUpdateStore(SQLBaseStore): def __init__( @@ -1553,29 +1597,6 @@ def __init__( ): super().__init__(database, db_conn, hs) - async def forget(self, user_id: str, room_id: str) -> None: - """Indicate that user_id wishes to discard history for room_id.""" - - def f(txn: LoggingTransaction) -> None: - sql = ( - "UPDATE" - " room_memberships" - " SET" - " forgotten = 1" - " WHERE" - " user_id = ?" - " AND" - " room_id = ?" - ) - txn.execute(sql, (user_id, room_id)) - - self._invalidate_cache_and_stream(txn, self.did_forget, (user_id, room_id)) - self._invalidate_cache_and_stream( - txn, self.get_forgotten_rooms_for_user, (user_id,) - ) - - await self.db_pool.runInteraction("forget_membership", f) - def extract_heroes_from_room_summary( details: Mapping[str, MemberSummary], me: str diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 5d65faed164a..b7d58978de2b 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -27,6 +27,8 @@ cast, ) +import attr + try: # Figure out if ICU support is available for searching users. import icu @@ -66,6 +68,19 @@ TEMP_TABLE = "_temp_populate_user_directory" +@attr.s(auto_attribs=True, frozen=True) +class _UserDirProfile: + """Helper type for the user directory code for an entry to be inserted into + the directory. + """ + + user_id: str + + # If the display name or avatar URL are unexpected types, replace with None + display_name: Optional[str] = attr.ib(default=None, converter=non_null_str_or_none) + avatar_url: Optional[str] = attr.ib(default=None, converter=non_null_str_or_none) + + class UserDirectoryBackgroundUpdateStore(StateDeltasStore): # How many records do we calculate before sending it to # add_users_who_share_private_rooms? @@ -381,25 +396,65 @@ def _get_next_batch(txn: LoggingTransaction) -> Optional[List[str]]: % (len(users_to_work_on), progress["remaining"]) ) - for user_id in users_to_work_on: - if await self.should_include_local_user_in_dir(user_id): - profile = await self.get_profileinfo(get_localpart_from_id(user_id)) # type: ignore[attr-defined] - await self.update_profile_in_user_dir( - user_id, profile.display_name, profile.avatar_url - ) - - # We've finished processing a user. Delete it from the table. - await self.db_pool.simple_delete_one( - TEMP_TABLE + "_users", {"user_id": user_id} - ) - # Update the remaining counter. - progress["remaining"] -= 1 - await self.db_pool.runInteraction( - "populate_user_directory", - self.db_pool.updates._background_update_progress_txn, - "populate_user_directory_process_users", - progress, + # First filter down to users we want to insert into the user directory. + users_to_insert = [ + user_id + for user_id in users_to_work_on + if await self.should_include_local_user_in_dir(user_id) + ] + + # Next fetch their profiles. Note that the `user_id` here is the + # *localpart*, and that not all users have profiles. + profile_rows = await self.db_pool.simple_select_many_batch( + table="profiles", + column="user_id", + iterable=[get_localpart_from_id(u) for u in users_to_insert], + retcols=( + "user_id", + "displayname", + "avatar_url", + ), + keyvalues={}, + desc="populate_user_directory_process_users_get_profiles", + ) + profiles = { + f"@{row['user_id']}:{self.server_name}": _UserDirProfile( + f"@{row['user_id']}:{self.server_name}", + row["displayname"], + row["avatar_url"], ) + for row in profile_rows + } + + profiles_to_insert = [ + profiles.get(user_id) or _UserDirProfile(user_id) + for user_id in users_to_insert + ] + + # Actually insert the users with their profiles into the directory. + await self.db_pool.runInteraction( + "populate_user_directory_process_users_insertion", + self._update_profiles_in_user_dir_txn, + profiles_to_insert, + ) + + # We've finished processing the users. Delete it from the table. + await self.db_pool.simple_delete_many( + table=TEMP_TABLE + "_users", + column="user_id", + iterable=users_to_work_on, + keyvalues={}, + desc="populate_user_directory_process_users_delete", + ) + + # Update the remaining counter. + progress["remaining"] -= len(users_to_work_on) + await self.db_pool.runInteraction( + "populate_user_directory", + self.db_pool.updates._background_update_progress_txn, + "populate_user_directory_process_users", + progress, + ) return len(users_to_work_on) @@ -584,72 +639,102 @@ async def update_profile_in_user_dir( Update or add a user's profile in the user directory. If the user is remote, the profile will be marked as not stale. """ - # If the display name or avatar URL are unexpected types, replace with None. - display_name = non_null_str_or_none(display_name) - avatar_url = non_null_str_or_none(avatar_url) + await self.db_pool.runInteraction( + "update_profiles_in_user_dir", + self._update_profiles_in_user_dir_txn, + [_UserDirProfile(user_id, display_name, avatar_url)], + ) + + def _update_profiles_in_user_dir_txn( + self, + txn: LoggingTransaction, + profiles: Sequence[_UserDirProfile], + ) -> None: + self.db_pool.simple_upsert_many_txn( + txn, + table="user_directory", + key_names=("user_id",), + key_values=[(p.user_id,) for p in profiles], + value_names=("display_name", "avatar_url"), + value_values=[ + ( + p.display_name, + p.avatar_url, + ) + for p in profiles + ], + ) - def _update_profile_in_user_dir_txn(txn: LoggingTransaction) -> None: - self.db_pool.simple_upsert_txn( + # Remote users: Make sure the profile is not marked as stale anymore. + remote_users = [ + p.user_id for p in profiles if not self.hs.is_mine_id(p.user_id) + ] + if remote_users: + self.db_pool.simple_delete_many_txn( txn, - table="user_directory", - keyvalues={"user_id": user_id}, - values={"display_name": display_name, "avatar_url": avatar_url}, + table="user_directory_stale_remote_users", + column="user_id", + values=remote_users, + keyvalues={}, ) - if not self.hs.is_mine_id(user_id): - # Remote users: Make sure the profile is not marked as stale anymore. - self.db_pool.simple_delete_txn( - txn, - table="user_directory_stale_remote_users", - keyvalues={"user_id": user_id}, + if isinstance(self.database_engine, PostgresEngine): + # We weight the localpart most highly, then display name and finally + # server name + template = """ + ( + %s, + setweight(to_tsvector('simple', %s), 'A') + || setweight(to_tsvector('simple', %s), 'D') + || setweight(to_tsvector('simple', COALESCE(%s, '')), 'B') ) + """ - # The display name that goes into the database index. - index_display_name = display_name - if index_display_name is not None: - index_display_name = _filter_text_for_index(index_display_name) - - if isinstance(self.database_engine, PostgresEngine): - # We weight the localpart most highly, then display name and finally - # server name - sql = """ - INSERT INTO user_directory_search(user_id, vector) - VALUES (?, - setweight(to_tsvector('simple', ?), 'A') - || setweight(to_tsvector('simple', ?), 'D') - || setweight(to_tsvector('simple', COALESCE(?, '')), 'B') - ) ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector - """ - txn.execute( - sql, + sql = """ + INSERT INTO user_directory_search(user_id, vector) + VALUES ? ON CONFLICT (user_id) DO UPDATE SET vector=EXCLUDED.vector + """ + txn.execute_values( + sql, + [ ( - user_id, - get_localpart_from_id(user_id), - get_domain_from_id(user_id), - index_display_name, - ), - ) - elif isinstance(self.database_engine, Sqlite3Engine): - value = ( - "%s %s" % (user_id, index_display_name) - if index_display_name - else user_id - ) - self.db_pool.simple_upsert_txn( - txn, - table="user_directory_search", - keyvalues={"user_id": user_id}, - values={"value": value}, - ) - else: - # This should be unreachable. - raise Exception("Unrecognized database engine") + p.user_id, + get_localpart_from_id(p.user_id), + get_domain_from_id(p.user_id), + _filter_text_for_index(p.display_name) + if p.display_name + else None, + ) + for p in profiles + ], + template=template, + fetch=False, + ) + elif isinstance(self.database_engine, Sqlite3Engine): + values = [] + for p in profiles: + if p.display_name is not None: + index_display_name = _filter_text_for_index(p.display_name) + value = f"{p.user_id} {index_display_name}" + else: + value = p.user_id - txn.call_after(self.get_user_in_directory.invalidate, (user_id,)) + values.append((value,)) - await self.db_pool.runInteraction( - "update_profile_in_user_dir", _update_profile_in_user_dir_txn - ) + self.db_pool.simple_upsert_many_txn( + txn, + table="user_directory_search", + key_names=("user_id",), + key_values=[(p.user_id,) for p in profiles], + value_names=("value",), + value_values=values, + ) + else: + # This should be unreachable. + raise Exception("Unrecognized database engine") + + for p in profiles: + txn.call_after(self.get_user_in_directory.invalidate, (p.user_id,)) async def add_users_who_share_private_room( self, room_id: str, user_id_tuples: Iterable[Tuple[str, str]] diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 1a8f1c0c123b..ed068e646d24 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -107,6 +107,12 @@ SCHEMA_COMPAT_VERSION = ( + # Queries against `event_stream_ordering` columns in membership tables must + # be disambiguated. + # + # The threads_id column must written to with non-null values for the + # event_push_actions, event_push_actions_staging, and event_push_summary tables. + # # insertions to the column `full_user_id` of tables profiles and user_filters can no # longer be null 75 diff --git a/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql b/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql new file mode 100644 index 000000000000..be4b57d86f7a --- /dev/null +++ b/synapse/storage/schema/main/delta/76/04_add_room_forgetter.sql @@ -0,0 +1,24 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +CREATE TABLE room_forgetter_stream_pos ( + Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row. + stream_id BIGINT NOT NULL, + CHECK (Lock='X') +); + +INSERT INTO room_forgetter_stream_pos ( + stream_id +) SELECT COALESCE(MAX(stream_ordering), 0) from events; diff --git a/synapse/storage/schema/main/delta/76/04thread_notifications_backfill.sql b/synapse/storage/schema/main/delta/76/04thread_notifications_backfill.sql new file mode 100644 index 000000000000..ce6f9ff93748 --- /dev/null +++ b/synapse/storage/schema/main/delta/76/04thread_notifications_backfill.sql @@ -0,0 +1,28 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Force the background updates from 06thread_notifications.sql to run in the +-- foreground as code will now require those to be "done". + +DELETE FROM background_updates WHERE update_name = 'event_push_backfill_thread_id'; + +-- Overwrite any null thread_id values. +UPDATE event_push_actions_staging SET thread_id = 'main' WHERE thread_id IS NULL; +UPDATE event_push_actions SET thread_id = 'main' WHERE thread_id IS NULL; +UPDATE event_push_summary SET thread_id = 'main' WHERE thread_id IS NULL; + +-- Drop the background updates to calculate the indexes used to find null thread_ids. +DELETE FROM background_updates WHERE update_name = 'event_push_actions_thread_id_null'; +DELETE FROM background_updates WHERE update_name = 'event_push_summary_thread_id_null'; diff --git a/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.postgres b/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.postgres new file mode 100644 index 000000000000..40936def6faa --- /dev/null +++ b/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.postgres @@ -0,0 +1,37 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- The thread_id columns can now be made non-nullable, this is done by using a +-- constraint (and not altering the column) to avoid taking out a full table lock. +-- +-- We initially add an invalid constraint which guards against new data (this +-- doesn't lock the table). +ALTER TABLE event_push_actions_staging + ADD CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; +ALTER TABLE event_push_actions + ADD CONSTRAINT event_push_actions_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; +ALTER TABLE event_push_summary + ADD CONSTRAINT event_push_summary_thread_id CHECK (thread_id IS NOT NULL) NOT VALID; + +-- We then validate the constraint which doesn't need to worry about new data. It +-- only needs a SHARE UPDATE EXCLUSIVE lock but can still take a while to complete. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7605, 'event_push_actions_staging_thread_id', '{}'), + (7605, 'event_push_actions_thread_id', '{}'), + (7605, 'event_push_summary_thread_id', '{}'); + +-- Drop the indexes used to find null thread_ids. +DROP INDEX IF EXISTS event_push_actions_thread_id_null; +DROP INDEX IF EXISTS event_push_summary_thread_id_null; diff --git a/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.sqlite b/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.sqlite new file mode 100644 index 000000000000..e9372b6cf9e1 --- /dev/null +++ b/synapse/storage/schema/main/delta/76/05thread_notifications_not_null.sql.sqlite @@ -0,0 +1,102 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + -- The thread_id columns can now be made non-nullable. +-- +-- SQLite doesn't support modifying columns to an existing table, so it must +-- be recreated. + +-- Create the new tables. +CREATE TABLE event_push_actions_staging_new ( + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + actions TEXT NOT NULL, + notif SMALLINT NOT NULL, + highlight SMALLINT NOT NULL, + unread SMALLINT, + thread_id TEXT, + inserted_ts BIGINT, + CONSTRAINT event_push_actions_staging_thread_id CHECK (thread_id is NOT NULL) +); + +CREATE TABLE event_push_actions_new ( + room_id TEXT NOT NULL, + event_id TEXT NOT NULL, + user_id TEXT NOT NULL, + profile_tag VARCHAR(32), + actions TEXT NOT NULL, + topological_ordering BIGINT, + stream_ordering BIGINT, + notif SMALLINT, + highlight SMALLINT, + unread SMALLINT, + thread_id TEXT, + CONSTRAINT event_id_user_id_profile_tag_uniqueness UNIQUE (room_id, event_id, user_id, profile_tag), + CONSTRAINT event_push_actions_thread_id CHECK (thread_id is NOT NULL) +); + +CREATE TABLE event_push_summary_new ( + user_id TEXT NOT NULL, + room_id TEXT NOT NULL, + notif_count BIGINT NOT NULL, + stream_ordering BIGINT NOT NULL, + unread_count BIGINT, + last_receipt_stream_ordering BIGINT, + thread_id TEXT, + CONSTRAINT event_push_summary_thread_id CHECK (thread_id is NOT NULL) +); + +-- Copy the data. +INSERT INTO event_push_actions_staging_new (event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts) + SELECT event_id, user_id, actions, notif, highlight, unread, thread_id, inserted_ts + FROM event_push_actions_staging; + +INSERT INTO event_push_actions_new (room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id) + SELECT room_id, event_id, user_id, profile_tag, actions, topological_ordering, stream_ordering, notif, highlight, unread, thread_id + FROM event_push_actions; + +INSERT INTO event_push_summary_new (user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id) + SELECT user_id, room_id, notif_count, stream_ordering, unread_count, last_receipt_stream_ordering, thread_id + FROM event_push_summary; + +-- Drop the old tables. +DROP TABLE event_push_actions_staging; +DROP TABLE event_push_actions; +DROP TABLE event_push_summary; + +-- Rename the tables. +ALTER TABLE event_push_actions_staging_new RENAME TO event_push_actions_staging; +ALTER TABLE event_push_actions_new RENAME TO event_push_actions; +ALTER TABLE event_push_summary_new RENAME TO event_push_summary; + +-- Recreate the indexes. +CREATE INDEX event_push_actions_staging_id ON event_push_actions_staging(event_id); + +CREATE INDEX event_push_actions_highlights_index ON event_push_actions (user_id, room_id, topological_ordering, stream_ordering); +CREATE INDEX event_push_actions_rm_tokens on event_push_actions( user_id, room_id, topological_ordering, stream_ordering ); +CREATE INDEX event_push_actions_room_id_user_id on event_push_actions(room_id, user_id); +CREATE INDEX event_push_actions_stream_ordering on event_push_actions( stream_ordering, user_id ); +CREATE INDEX event_push_actions_u_highlight ON event_push_actions (user_id, stream_ordering); + +CREATE UNIQUE INDEX event_push_summary_unique_index2 ON event_push_summary (user_id, room_id, thread_id) ; + +-- Recreate some indexes in the background, by re-running the background updates +-- from 72/02event_push_actions_index.sql and 72/06thread_notifications.sql. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7403, 'event_push_summary_unique_index2', '{}') + ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}'; +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7403, 'event_push_actions_stream_highlight_index', '{}') + ON CONFLICT (update_name) DO UPDATE SET progress_json = '{}'; diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 6a38893b688a..a444d822cd4a 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -333,6 +333,17 @@ def test_leave_and_forget(self) -> None: self.get_success(self.store.is_locally_forgotten_room(self.room_id)) ) + @override_config({"forget_rooms_on_leave": True}) + def test_leave_and_auto_forget(self) -> None: + """Tests the `forget_rooms_on_leave` config option.""" + self.helper.join(self.room_id, user=self.bob, tok=self.bob_token) + + # alice is not the last room member that leaves and forgets the room + self.helper.leave(self.room_id, user=self.alice, tok=self.alice_token) + self.assertTrue( + self.get_success(self.store.did_forget(self.alice, self.room_id)) + ) + def test_leave_and_forget_last_user(self) -> None: """Tests that forget a room is successfully when the last user has left the room.""" diff --git a/tests/push/test_http.py b/tests/push/test_http.py index 4f811bb9c0f5..54f558742dfe 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -52,7 +52,7 @@ def post_json_get_json(url: str, body: JsonDict) -> Deferred: m.post_json_get_json = post_json_get_json - hs = self.setup_test_homeserver(pusher_http_client=m) + hs = self.setup_test_homeserver(proxied_blacklisted_http_client=m) return hs diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index b9bb1a649754..dcb3e6669bb9 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -93,7 +93,7 @@ def test_send_push_single_worker(self) -> None: self.make_worker_hs( "synapse.app.generic_worker", {"worker_name": "pusher1", "pusher_instances": ["pusher1"]}, - pusher_http_client=http_client_mock, + proxied_blacklisted_http_client=http_client_mock, ) event_id = self._create_pusher_and_send_msg("user") @@ -126,7 +126,7 @@ def test_send_push_multiple_workers(self) -> None: "worker_name": "pusher1", "pusher_instances": ["pusher1", "pusher2"], }, - pusher_http_client=http_client_mock1, + proxied_blacklisted_http_client=http_client_mock1, ) http_client_mock2 = Mock(spec_set=["post_json_get_json"]) @@ -140,7 +140,7 @@ def test_send_push_multiple_workers(self) -> None: "worker_name": "pusher2", "pusher_instances": ["pusher1", "pusher2"], }, - pusher_http_client=http_client_mock2, + proxied_blacklisted_http_client=http_client_mock2, ) # We choose a user name that we know should go to pusher1. diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 645a00b4b124..695e84357ad2 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -399,7 +399,7 @@ def test_enable_and_disable(self) -> None: "PUT", url, content={ - "features": {"msc3026": True, "msc2654": True}, + "features": {"msc3026": True, "msc3881": True}, }, access_token=self.admin_user_tok, ) @@ -420,7 +420,7 @@ def test_enable_and_disable(self) -> None: ) self.assertEqual( True, - channel.json_body["features"]["msc2654"], + channel.json_body["features"]["msc3881"], ) # test disabling a feature works @@ -448,10 +448,6 @@ def test_enable_and_disable(self) -> None: ) self.assertEqual( True, - channel.json_body["features"]["msc2654"], - ) - self.assertEqual( - False, channel.json_body["features"]["msc3881"], ) self.assertEqual(