diff --git a/.ci/scripts/prepare_old_deps.sh b/.ci/scripts/prepare_old_deps.sh index 3398193ee565..e536a9db8b33 100755 --- a/.ci/scripts/prepare_old_deps.sh +++ b/.ci/scripts/prepare_old_deps.sh @@ -35,9 +35,9 @@ sed -i \ # compatible (as far the package metadata declares, anyway); pip's package resolver # is more lax. # -# Rather than `poetry install --no-dev`, we drop all dev dependencies from the -# toml file. This means we don't have to ensure compatibility between old deps and -# dev tools. +# Rather than `poetry install --no-dev`, we drop all dev dependencies and the dev-docs +# group from the toml file. This means we don't have to ensure compatibility between +# old deps and dev tools. pip install toml wheel @@ -47,6 +47,7 @@ with open('pyproject.toml', 'r') as f: data = toml.loads(f.read()) del data['tool']['poetry']['dev-dependencies'] +del data['tool']['poetry']['group']['dev-docs'] with open('pyproject.toml', 'w') as f: toml.dump(data, f) diff --git a/.ci/scripts/setup_complement_prerequisites.sh b/.ci/scripts/setup_complement_prerequisites.sh index 3778478da644..47a3ff8e6966 100755 --- a/.ci/scripts/setup_complement_prerequisites.sh +++ b/.ci/scripts/setup_complement_prerequisites.sh @@ -9,16 +9,6 @@ set -eu alias block='{ set +x; } 2>/dev/null; func() { echo "::group::$*"; set -x; }; func' alias endblock='{ set +x; } 2>/dev/null; func() { echo "::endgroup::"; set -x; }; func' -block Set Go Version - # The path is set via a file given by $GITHUB_PATH. We need both Go 1.17 and GOPATH on the path to run Complement. - # See https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#adding-a-system-path - - # Add Go 1.17 to the PATH: see https://github.com/actions/virtual-environments/blob/main/images/linux/Ubuntu2004-Readme.md#environment-variables-2 - echo "$GOROOT_1_17_X64/bin" >> $GITHUB_PATH - # Add the Go path to the PATH: We need this so we can call gotestfmt - echo "~/go/bin" >> $GITHUB_PATH -endblock - block Install Complement Dependencies sudo apt-get -qq update && sudo apt-get install -qqy libolm3 libolm-dev go install -v github.com/gotesttools/gotestfmt/v2/cmd/gotestfmt@latest diff --git a/.gitignore b/.gitignore index 6937de88bcd1..9d037f28e758 100644 --- a/.gitignore +++ b/.gitignore @@ -53,6 +53,7 @@ __pycache__/ /coverage.* /dist/ /docs/build/ +/dev-docs/_build/ /htmlcov /pip-wheel-metadata/ @@ -61,7 +62,7 @@ book/ # complement /complement-* -/master.tar.gz +/main.tar.gz # rust /target/ diff --git a/CHANGES.md b/CHANGES.md index 5f2a4a41eb42..b2cc138ee302 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,181 @@ +Synapse 1.82.0 (2023-04-25) +=========================== + +No significant changes since 1.82.0rc1. + + +Synapse 1.82.0rc1 (2023-04-18) +============================== + +Features +-------- + +- Allow loading the `/directory/room/{roomAlias}` endpoint on workers. ([\#15333](https://github.com/matrix-org/synapse/issues/15333)) +- Add some validation to `instance_map` configuration loading. ([\#15431](https://github.com/matrix-org/synapse/issues/15431)) +- Allow loading the `/capabilities` endpoint on workers. ([\#15436](https://github.com/matrix-org/synapse/issues/15436)) + + +Bugfixes +-------- + +- Delete server-side backup keys when deactivating an account. ([\#15181](https://github.com/matrix-org/synapse/issues/15181)) +- Fix and document untold assumption that `on_logged_out` module hooks will be called before the deletion of pushers. ([\#15410](https://github.com/matrix-org/synapse/issues/15410)) +- Improve robustness when handling a perspective key response by deduplicating received server keys. ([\#15423](https://github.com/matrix-org/synapse/issues/15423)) +- Synapse now correctly fails to start if the config option `app_service_config_files` is not a list. ([\#15425](https://github.com/matrix-org/synapse/issues/15425)) +- Disable loading `RefreshTokenServlet` (`/_matrix/client/(r0|v3|unstable)/refresh`) on workers. ([\#15428](https://github.com/matrix-org/synapse/issues/15428)) + + +Improved Documentation +---------------------- + +- Note that the `delete_stale_devices_after` background job always runs on the main process. ([\#15452](https://github.com/matrix-org/synapse/issues/15452)) + + +Deprecations and Removals +------------------------- + +- Remove the broken, unspecced registration fallback. Note that the *login* fallback is unaffected by this change. ([\#15405](https://github.com/matrix-org/synapse/issues/15405)) + + +Internal Changes +---------------- + +- Bump black from 23.1.0 to 23.3.0. ([\#15372](https://github.com/matrix-org/synapse/issues/15372)) +- Bump pyopenssl from 23.1.0 to 23.1.1. ([\#15373](https://github.com/matrix-org/synapse/issues/15373)) +- Bump types-psycopg2 from 2.9.21.8 to 2.9.21.9. ([\#15374](https://github.com/matrix-org/synapse/issues/15374)) +- Bump types-netaddr from 0.8.0.6 to 0.8.0.7. ([\#15375](https://github.com/matrix-org/synapse/issues/15375)) +- Bump types-opentracing from 2.4.10.3 to 2.4.10.4. ([\#15376](https://github.com/matrix-org/synapse/issues/15376)) +- Bump dawidd6/action-download-artifact from 2.26.0 to 2.26.1. ([\#15404](https://github.com/matrix-org/synapse/issues/15404)) +- Bump parameterized from 0.8.1 to 0.9.0. ([\#15412](https://github.com/matrix-org/synapse/issues/15412)) +- Bump types-pillow from 9.4.0.17 to 9.4.0.19. ([\#15413](https://github.com/matrix-org/synapse/issues/15413)) +- Bump sentry-sdk from 1.17.0 to 1.19.1. ([\#15414](https://github.com/matrix-org/synapse/issues/15414)) +- Bump immutabledict from 2.2.3 to 2.2.4. ([\#15415](https://github.com/matrix-org/synapse/issues/15415)) +- Bump dawidd6/action-download-artifact from 2.26.1 to 2.27.0. ([\#15441](https://github.com/matrix-org/synapse/issues/15441)) +- Bump serde_json from 1.0.95 to 1.0.96. ([\#15442](https://github.com/matrix-org/synapse/issues/15442)) +- Bump serde from 1.0.159 to 1.0.160. ([\#15443](https://github.com/matrix-org/synapse/issues/15443)) +- Bump pillow from 9.4.0 to 9.5.0. ([\#15444](https://github.com/matrix-org/synapse/issues/15444)) +- Bump furo from 2023.3.23 to 2023.3.27. ([\#15445](https://github.com/matrix-org/synapse/issues/15445)) +- Bump types-pyopenssl from 23.1.0.0 to 23.1.0.2. ([\#15446](https://github.com/matrix-org/synapse/issues/15446)) +- Bump mypy from 1.0.0 to 1.0.1. ([\#15447](https://github.com/matrix-org/synapse/issues/15447)) +- Bump psycopg2 from 2.9.5 to 2.9.6. ([\#15448](https://github.com/matrix-org/synapse/issues/15448)) +- Improve DB performance of clearing out old data from `stream_ordering_to_exterm`. ([\#15382](https://github.com/matrix-org/synapse/issues/15382), [\#15429](https://github.com/matrix-org/synapse/issues/15429)) +- Implement [MSC3989](https://github.com/matrix-org/matrix-spec-proposals/pull/3989) redaction algorithm. ([\#15393](https://github.com/matrix-org/synapse/issues/15393)) +- Implement [MSC2175](https://github.com/matrix-org/matrix-doc/pull/2175) to stop adding `creator` to create events. ([\#15394](https://github.com/matrix-org/synapse/issues/15394)) +- Implement [MSC2174](https://github.com/matrix-org/matrix-spec-proposals/pull/2174) to move the `redacts` key to a `content` property. ([\#15395](https://github.com/matrix-org/synapse/issues/15395)) +- Trust dtonlay/rust-toolchain in CI. ([\#15406](https://github.com/matrix-org/synapse/issues/15406)) +- Explicitly install Synapse during typechecking in CI. ([\#15409](https://github.com/matrix-org/synapse/issues/15409)) +- Only load the SSO redirect servlet if SSO is enabled. ([\#15421](https://github.com/matrix-org/synapse/issues/15421)) +- Refactor `SimpleHttpClient` to pull out a base class. ([\#15427](https://github.com/matrix-org/synapse/issues/15427)) +- Improve type hints. ([\#15432](https://github.com/matrix-org/synapse/issues/15432)) +- Convert async to normal tests in `TestSSOHandler`. ([\#15433](https://github.com/matrix-org/synapse/issues/15433)) +- Speed up the user directory background update. ([\#15435](https://github.com/matrix-org/synapse/issues/15435)) +- Disable directory listing for static resources in `/_matrix/static/`. ([\#15438](https://github.com/matrix-org/synapse/issues/15438)) +- Move various module API callback registration methods to a dedicated class. ([\#15453](https://github.com/matrix-org/synapse/issues/15453)) + + +Synapse 1.81.0 (2023-04-11) +=========================== + +Synapse now attempts the versioned appservice paths before falling back to the +[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). +Usage of the legacy routes should be considered deprecated. + +Additionally, Synapse has supported sending the application service access token +via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization) +since v1.70.0. For backwards compatibility it is *also* sent as the `access_token` +query parameter. This is insecure and should be considered deprecated. + +A future version of Synapse (v1.88.0 or later) will remove support for legacy +application service routes and query parameter authorization. + + +No significant changes since 1.81.0rc2. + + +Synapse 1.81.0rc2 (2023-04-06) +============================== + +Bugfixes +-------- + +- Fix the `set_device_id_for_pushers_txn` background update crash. ([\#15391](https://github.com/matrix-org/synapse/issues/15391)) + + +Internal Changes +---------------- + +- Update CI to run complement under the latest stable go version. ([\#15403](https://github.com/matrix-org/synapse/issues/15403)) + + +Synapse 1.81.0rc1 (2023-04-04) +============================== + +Features +-------- + +- Add the ability to enable/disable registrations when in the OIDC flow. ([\#14978](https://github.com/matrix-org/synapse/issues/14978)) +- Add a primitive helper script for listing worker endpoints. ([\#15243](https://github.com/matrix-org/synapse/issues/15243)) +- Experimental support for passing One Time Key and device key requests to application services ([MSC3983](https://github.com/matrix-org/matrix-spec-proposals/pull/3983) and [MSC3984](https://github.com/matrix-org/matrix-spec-proposals/pull/3984)). ([\#15314](https://github.com/matrix-org/synapse/issues/15314), [\#15321](https://github.com/matrix-org/synapse/issues/15321)) +- Allow loading `/password_policy` endpoint on workers. ([\#15331](https://github.com/matrix-org/synapse/issues/15331)) +- Add experimental support for Unix sockets. Contributed by Jason Little. ([\#15353](https://github.com/matrix-org/synapse/issues/15353)) +- Build Debian packages for Ubuntu 23.04 (Lunar Lobster). ([\#15381](https://github.com/matrix-org/synapse/issues/15381)) + + +Bugfixes +-------- + +- Fix a long-standing bug where edits of non-`m.room.message` events would not be correctly bundled. ([\#15295](https://github.com/matrix-org/synapse/issues/15295)) +- Fix a bug introduced in Synapse v1.55.0 which could delay remote homeservers being able to decrypt encrypted messages sent by local users. ([\#15297](https://github.com/matrix-org/synapse/issues/15297)) +- Add a check to [SQLite port_db script](https://matrix-org.github.io/synapse/latest/postgres.html#porting-from-sqlite) + to ensure that the sqlite database passed to the script exists before trying to port from it. ([\#15306](https://github.com/matrix-org/synapse/issues/15306)) +- Fix a bug introduced in Synapse 1.76.0 where responses from worker deployments could include an internal `_INT_STREAM_POS` key. ([\#15309](https://github.com/matrix-org/synapse/issues/15309)) +- Fix a long-standing bug that Synpase only used the [legacy appservice routes](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). ([\#15317](https://github.com/matrix-org/synapse/issues/15317)) +- Fix a long-standing bug preventing users from rejoining rooms after being banned and unbanned over federation. Contributed by Nico. ([\#15323](https://github.com/matrix-org/synapse/issues/15323)) +- Fix bug in worker mode where on a rolling restart of workers the "typing" worker would consume 100% CPU until it got restarted. ([\#15332](https://github.com/matrix-org/synapse/issues/15332)) +- Fix a long-standing bug where some to_device messages could be dropped when using workers. ([\#15349](https://github.com/matrix-org/synapse/issues/15349)) +- Fix a bug introduced in Synapse 1.70.0 where the background sync from a faster join could spin for hours when one of the events involved had been marked for backoff. ([\#15351](https://github.com/matrix-org/synapse/issues/15351)) +- Fix missing app variable in mail subject for password resets. Contributed by Cyberes. ([\#15352](https://github.com/matrix-org/synapse/issues/15352)) +- Fix a rare bug introduced in Synapse 1.66.0 where initial syncs would fail when the user had been kicked from a faster joined room that had not finished syncing. ([\#15383](https://github.com/matrix-org/synapse/issues/15383)) + + +Improved Documentation +---------------------- + +- Fix a typo in login requests ratelimit defaults. ([\#15341](https://github.com/matrix-org/synapse/issues/15341)) +- Add some clarification to the doc/comments regarding TCP replication. ([\#15354](https://github.com/matrix-org/synapse/issues/15354)) +- Note that Synapse 1.74 queued a rebuild of the user directory tables. ([\#15386](https://github.com/matrix-org/synapse/issues/15386)) + + +Internal Changes +---------------- + +- Use `immutabledict` instead of `frozendict`. ([\#15113](https://github.com/matrix-org/synapse/issues/15113)) +- Add developer documentation for the Federation Sender and add a documentation mechanism using Sphinx. ([\#15265](https://github.com/matrix-org/synapse/issues/15265), [\#15336](https://github.com/matrix-org/synapse/issues/15336)) +- Make the pushers rely on the `device_id` instead of the `access_token_id` for various operations. ([\#15280](https://github.com/matrix-org/synapse/issues/15280)) +- Bump sentry-sdk from 1.15.0 to 1.17.0. ([\#15285](https://github.com/matrix-org/synapse/issues/15285)) +- Allow running the Twisted trunk job against other branches. ([\#15302](https://github.com/matrix-org/synapse/issues/15302)) +- Remind the releaser to ask for changelog feedback in [#synapse-dev](https://matrix.to/#/#synapse-dev:matrix.org). ([\#15303](https://github.com/matrix-org/synapse/issues/15303)) +- Bump dtolnay/rust-toolchain from e12eda571dc9a5ee5d58eecf4738ec291c66f295 to fc3253060d0c959bea12a59f10f8391454a0b02d. ([\#15304](https://github.com/matrix-org/synapse/issues/15304)) +- Reject events with an invalid "mentions" property per [MSC3952](https://github.com/matrix-org/matrix-spec-proposals/pull/3952). ([\#15311](https://github.com/matrix-org/synapse/issues/15311)) +- As an optimisation, use `TRUNCATE` on Postgres when clearing the user directory tables. ([\#15316](https://github.com/matrix-org/synapse/issues/15316)) +- Fix `.gitignore` rule for the Complement source tarball downloaded automatically by `complement.sh`. ([\#15319](https://github.com/matrix-org/synapse/issues/15319)) +- Bump serde from 1.0.157 to 1.0.158. ([\#15324](https://github.com/matrix-org/synapse/issues/15324)) +- Bump regex from 1.7.1 to 1.7.3. ([\#15325](https://github.com/matrix-org/synapse/issues/15325)) +- Bump types-pyopenssl from 23.0.0.4 to 23.1.0.0. ([\#15326](https://github.com/matrix-org/synapse/issues/15326)) +- Bump furo from 2022.12.7 to 2023.3.23. ([\#15327](https://github.com/matrix-org/synapse/issues/15327)) +- Bump ruff from 0.0.252 to 0.0.259. ([\#15328](https://github.com/matrix-org/synapse/issues/15328)) +- Bump cryptography from 40.0.0 to 40.0.1. ([\#15329](https://github.com/matrix-org/synapse/issues/15329)) +- Bump mypy-zope from 0.9.0 to 0.9.1. ([\#15330](https://github.com/matrix-org/synapse/issues/15330)) +- Speed up unit tests when using SQLite3. ([\#15334](https://github.com/matrix-org/synapse/issues/15334)) +- Speed up pydantic CI job. ([\#15339](https://github.com/matrix-org/synapse/issues/15339)) +- Speed up sample config CI job. ([\#15340](https://github.com/matrix-org/synapse/issues/15340)) +- Fix copyright year in SSO footer template. ([\#15358](https://github.com/matrix-org/synapse/issues/15358)) +- Bump peaceiris/actions-gh-pages from 3.9.2 to 3.9.3. ([\#15369](https://github.com/matrix-org/synapse/issues/15369)) +- Bump serde from 1.0.158 to 1.0.159. ([\#15370](https://github.com/matrix-org/synapse/issues/15370)) +- Bump serde_json from 1.0.94 to 1.0.95. ([\#15371](https://github.com/matrix-org/synapse/issues/15371)) +- Speed up membership queries for users with forgotten rooms. ([\#15385](https://github.com/matrix-org/synapse/issues/15385)) + + Synapse 1.80.0 (2023-03-28) =========================== @@ -394,7 +572,7 @@ Those who are `poetry install`ing from source using our lockfile should ensure t Notes on faster joins --------------------- -The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms. +The faster joins project sees the most benefit when joining a room with a large number of members (joined or historical). We expect it to be particularly useful for joining large public rooms like the [Matrix HQ](https://matrix.to/#/#matrix:matrix.org) or [Synapse Admins](https://matrix.to/#/#synapse:matrix.org) rooms. After a faster join, Synapse considers that room "partially joined". In this state, you should be able to diff --git a/Cargo.lock b/Cargo.lock index 2bad27154391..f661eb532cdc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -294,9 +294,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.7.1" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48aaa5748ba571fb95cd2c85c09f629215d3a6ece942baa100950af03a34f733" +checksum = "8b1f693b24f6ac912f4893ef08244d70b6067480d2f1a46e950c9691e6749d1d" dependencies = [ "aho-corasick", "memchr", @@ -305,9 +305,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "ryu" @@ -323,29 +323,29 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "serde" -version = "1.0.157" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707de5fcf5df2b5788fca98dd7eab490bc2fd9b7ef1404defc462833b83f25ca" +checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.157" +version = "1.0.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78997f4555c22a7971214540c4a661291970619afd56de19f77e0de86296e1e5" +checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.2", + "syn 2.0.10", ] [[package]] name = "serde_json" -version = "1.0.94" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c533a59c9d8a93a09c6ab31f0fd5e5f4dd1b8fc9434804029839884765d04ea" +checksum = "057d394a50403bcac12672b2b18fb387ab6d289d957dab67dd201875391e52f1" dependencies = [ "itoa", "ryu", @@ -377,9 +377,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.2" +version = "2.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59d3276aee1fa0c33612917969b5172b5be2db051232a6e4826f1a1a9191b045" +checksum = "5aad1363ed6d37b84299588d62d3a7d95b5a5c2d9aad5c85609fda12afaa1f40" dependencies = [ "proc-macro2", "quote", diff --git a/debian/changelog b/debian/changelog index 98366d49166b..f6e8720e5894 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,33 @@ +matrix-synapse-py3 (1.82.0) stable; urgency=medium + + * New Synapse release 1.82.0. + + -- Synapse Packaging team Tue, 25 Apr 2023 11:56:06 +0100 + +matrix-synapse-py3 (1.82.0~rc1) stable; urgency=medium + + * New Synapse release 1.82.0rc1. + + -- Synapse Packaging team Tue, 18 Apr 2023 09:47:30 +0100 + +matrix-synapse-py3 (1.81.0) stable; urgency=medium + + * New Synapse release 1.81.0. + + -- Synapse Packaging team Tue, 11 Apr 2023 14:18:35 +0100 + +matrix-synapse-py3 (1.81.0~rc2) stable; urgency=medium + + * New Synapse release 1.81.0rc2. + + -- Synapse Packaging team Thu, 06 Apr 2023 16:07:54 +0100 + +matrix-synapse-py3 (1.81.0~rc1) stable; urgency=medium + + * New Synapse release 1.81.0rc1. + + -- Synapse Packaging team Tue, 04 Apr 2023 14:29:03 +0100 + matrix-synapse-py3 (1.80.0) stable; urgency=medium * New Synapse release 1.80.0. diff --git a/dev-docs/Makefile b/dev-docs/Makefile new file mode 100644 index 000000000000..d4bb2cbb9edd --- /dev/null +++ b/dev-docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/dev-docs/conf.py b/dev-docs/conf.py new file mode 100644 index 000000000000..826d578c0bec --- /dev/null +++ b/dev-docs/conf.py @@ -0,0 +1,50 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = "Synapse development" +copyright = "2023, The Matrix.org Foundation C.I.C." +author = "The Synapse Maintainers and Community" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + "autodoc2", + "myst_parser", +] + +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + + +# -- Options for Autodoc2 ---------------------------------------------------- + +autodoc2_docstring_parser_regexes = [ + # this will render all docstrings as 'MyST' Markdown + (r".*", "myst"), +] + +autodoc2_packages = [ + { + "path": "../synapse", + # Don't render documentation for everything as a matter of course + "auto_mode": False, + }, +] + + +# -- Options for MyST (Markdown) --------------------------------------------- + +# myst_heading_anchors = 2 + + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = "furo" +html_static_path = ["_static"] diff --git a/dev-docs/index.rst b/dev-docs/index.rst new file mode 100644 index 000000000000..1ef210460abc --- /dev/null +++ b/dev-docs/index.rst @@ -0,0 +1,22 @@ +.. Synapse Developer Documentation documentation master file, created by + sphinx-quickstart on Mon Mar 13 08:59:51 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to the Synapse Developer Documentation! +=========================================================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + modules/federation_sender + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/dev-docs/modules/federation_sender.md b/dev-docs/modules/federation_sender.md new file mode 100644 index 000000000000..dac6852c1664 --- /dev/null +++ b/dev-docs/modules/federation_sender.md @@ -0,0 +1,5 @@ +Federation Sender +================= + +```{autodoc2-docstring} synapse.federation.sender +``` diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index 3f2f5c2daf85..4beec3daaf8b 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -172,6 +172,9 @@ "^/_matrix/client/v1/rooms/.*/timestamp_to_event$", "^/_matrix/client/(api/v1|r0|v3|unstable)/search", "^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$)", + "^/_matrix/client/(r0|v3|unstable)/password_policy$", + "^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$", + "^/_matrix/client/(r0|v3|unstable)/capabilities$", ], "shared_extra_conf": {}, "worker_extra_conf": "", diff --git a/docs/modules/password_auth_provider_callbacks.md b/docs/modules/password_auth_provider_callbacks.md index f6349d5404f4..8275f7ebdc67 100644 --- a/docs/modules/password_auth_provider_callbacks.md +++ b/docs/modules/password_auth_provider_callbacks.md @@ -103,6 +103,9 @@ Called during a logout request for a user. It is passed the qualified user ID, t deactivated device (if any: access tokens are occasionally created without an associated device ID), and the (now deactivated) access token. +Deleting the related pushers is done after calling `on_logged_out`, so you can rely on them +to still be present. + If multiple modules implement this callback, Synapse runs them all in order. ### `get_username_for_registration` diff --git a/docs/tcp_replication.md b/docs/tcp_replication.md index 15df949debc8..083cda8413d3 100644 --- a/docs/tcp_replication.md +++ b/docs/tcp_replication.md @@ -25,7 +25,7 @@ position of all streams. The server then periodically sends `RDATA` commands which have the format `RDATA `, where the format of `` is defined by the individual streams. The `` is the name of the Synapse process that generated the data -(usually "master"). +(usually "master"). We expect an RDATA for every row in the DB. Error reporting happens by either the client or server sending an ERROR command, and usually the connection will be closed. @@ -107,7 +107,7 @@ reconnect, following the steps above. If the server sends messages faster than the client can consume them the server will first buffer a (fairly large) number of commands and then disconnect the client. This ensures that we don't queue up an unbounded -number of commands in memory and gives us a potential oppurtunity to +number of commands in memory and gives us a potential opportunity to squawk loudly. When/if the client recovers it can reconnect to the server and ask for missed messages. @@ -122,7 +122,7 @@ since these include tokens which can be used to restart the stream on connection errors. The client should keep track of the token in the last RDATA command -received for each stream so that on reconneciton it can start streaming +received for each stream so that on reconnection it can start streaming from the correct place. Note: not all RDATA have valid tokens due to batching. See `RdataCommand` for more details. @@ -188,7 +188,8 @@ client (C): Two positions are included, the "new" position and the last position sent respectively. This allows servers to tell instances that the positions have advanced but no data has been written, without clients needlessly checking to see if they - have missed any updates. + have missed any updates. Instances will only fetch stuff if there is a gap between + their current position and the given last position. #### ERROR (S, C) diff --git a/docs/upgrade.md b/docs/upgrade.md index f14444a400ea..0886b0311571 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -88,6 +88,22 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.81.0 + +## Application service path & authentication deprecations + +Synapse now attempts the versioned appservice paths before falling back to the +[legacy paths](https://spec.matrix.org/v1.6/application-service-api/#legacy-routes). +Usage of the legacy routes should be considered deprecated. + +Additionally, Synapse has supported sending the application service access token +via [the `Authorization` header](https://spec.matrix.org/v1.6/application-service-api/#authorization) +since v1.70.0. For backwards compatibility it is *also* sent as the `access_token` +query parameter. This is insecure and should be considered deprecated. + +A future version of Synapse (v1.88.0 or later) will remove support for legacy +application service routes and query parameter authorization. + # Upgrading to v1.80.0 ## Reporting events error code change @@ -183,6 +199,17 @@ Docker images and Debian packages need nothing specific as they already include or specify ICU as an explicit dependency. +## User directory rebuild + +Synapse 1.74 queues a background update +[to rebuild the user directory](https://github.com/matrix-org/synapse/pull/14643), +in order to fix missing or erroneous entries. + +When this update begins, the user directory will be cleared out and rebuilt from +scratch. User directory lookups will be incomplete until the rebuild completes. +Admins can monitor the rebuild's progress by using the +[Background update Admin API](usage/administration/admin_api/background_updates.md#status). + # Upgrading to v1.73.0 ## Legacy Prometheus metric names have now been removed diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 015855ee7ef4..1b6f2569490e 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -577,6 +577,10 @@ delete any device that hasn't been accessed for more than the specified amount o Defaults to no duration, which means devices are never pruned. +**Note:** This task will always run on the main process, regardless of the value of +`run_background_tasks_on`. This is due to workers currently not having the ability to +delete devices. + Example configuration: ```yaml delete_stale_devices_after: 1y @@ -1521,7 +1525,7 @@ This option specifies several limits for login: address. Defaults to `per_second: 0.003`, `burst_count: 5`. * `account` ratelimits login requests based on the account the - client is attempting to log into. Defaults to `per_second: 0.03`, + client is attempting to log into. Defaults to `per_second: 0.003`, `burst_count: 5`. * `failed_attempts` ratelimits login requests based on the account the @@ -3100,6 +3104,11 @@ Options for each entry include: match a pre-existing account instead of failing. This could be used if switching from password logins to OIDC. Defaults to false. +* `enable_registration`: set to 'false' to disable automatic registration of new + users. This allows the OIDC SSO flow to be limited to sign in only, rather than + automatically registering users that have a valid SSO login but do not have + a pre-registered account. Defaults to true. + * `user_mapping_provider`: Configuration for how attributes returned from a OIDC provider are mapped onto a matrix user. This setting has the following sub-properties: @@ -3216,6 +3225,7 @@ oidc_providers: userinfo_endpoint: "https://accounts.example.com/userinfo" jwks_uri: "https://accounts.example.com/.well-known/jwks.json" skip_verification: true + enable_registration: true user_mapping_provider: config: subject_claim: "id" diff --git a/docs/workers.md b/docs/workers.md index bf7690f5aff2..6192a46e0950 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -234,6 +234,8 @@ information. ^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases ^/_matrix/client/(api/v1|r0|v3|unstable)/search$ ^/_matrix/client/(r0|v3|unstable)/user/.*/filter(/|$) + ^/_matrix/client/(api/v1|r0|v3|unstable)/directory/room/.*$ + ^/_matrix/client/(r0|v3|unstable)/capabilities$ # Encryption requests ^/_matrix/client/(r0|v3|unstable)/keys/query$ @@ -247,6 +249,7 @@ information. ^/_matrix/client/(r0|v3|unstable)/register$ ^/_matrix/client/(r0|v3|unstable)/register/available$ ^/_matrix/client/v1/register/m.login.registration_token/validity$ + ^/_matrix/client/(r0|v3|unstable)/password_policy$ # Event sending requests ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact diff --git a/pyproject.toml b/pyproject.toml index 9a68159f6aaa..dfc9850a8de4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -89,7 +89,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.80.0" +version = "1.82.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" @@ -153,15 +153,13 @@ python = "^3.7.1" # ---------------------- # we use the TYPE_CHECKER.redefine method added in jsonschema 3.0.0 jsonschema = ">=3.0.0" -# frozendict 2.1.2 is broken on Debian 10: https://github.com/Marco-Sulla/python-frozendict/issues/41 -# We cannot test our wheels against the 2.3.5 release in CI. Putting in an upper bound for this -# because frozendict has been more trouble than it's worth; we would like to move to immutabledict. -frozendict = ">=1,!=2.1.2,<2.3.5" +# We choose 2.0 as a lower bound: the most recent backwards incompatible release. +# It seems generally available, judging by https://pkgs.org/search/?q=immutabledict +immutabledict = ">=2.0" # We require 2.1.0 or higher for type hints. Previous guard was >= 1.1.0 unpaddedbase64 = ">=2.1.0" -# We require 1.5.0 to work around an issue when running against the C implementation of -# frozendict: https://github.com/matrix-org/python-canonicaljson/issues/36 -canonicaljson = "^1.5.0" +# We require 2.0.0 for immutabledict support. +canonicaljson = "^2.0.0" # we use the type definitions added in signedjson 1.1. signedjson = "^1.1.0" # validating SSL certs for IP addresses requires service_identity 18.1. @@ -314,7 +312,7 @@ all = [ # We pin black so that our tests don't start failing on new releases. isort = ">=5.10.1" black = ">=22.3.0" -ruff = "0.0.252" +ruff = "0.0.259" # Typechecking mypy = "*" @@ -353,6 +351,18 @@ towncrier = ">=18.6.0rc1" # Used for checking the Poetry lockfile tomli = ">=1.2.3" + +# Dependencies for building the development documentation +[tool.poetry.group.dev-docs] +optional = true + +[tool.poetry.group.dev-docs.dependencies] +sphinx = {version = "^6.1", python = "^3.8"} +sphinx-autodoc2 = {version = "^0.4.2", python = "^3.8"} +myst-parser = {version = "^1.0.0", python = "^3.8"} +furo = ">=2022.12.7,<2024.0.0" + + [build-system] # The upper bounds here are defensive, intended to prevent situations like # #13849 and #14079 where we see buildtime or runtime errors caused by build diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index 744230019610..ede766501100 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -28,6 +28,7 @@ "ubuntu:focal", # 20.04 LTS (our EOL forced by Py38 on 2024-10-14) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) "ubuntu:kinetic", # 22.10 (EOL 2023-07-20) + "ubuntu:lunar", # 23.04 (EOL 2024-01) ) DESC = """\ diff --git a/scripts-dev/lint.sh b/scripts-dev/lint.sh index 9e4ed3246e7a..1c0e6582f684 100755 --- a/scripts-dev/lint.sh +++ b/scripts-dev/lint.sh @@ -91,6 +91,7 @@ else "synapse" "docker" "tests" "scripts-dev" "contrib" "synmark" "stubs" ".ci" + "dev-docs" ) fi fi diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 008a5bd965d4..ec92a59bb8be 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -280,7 +280,7 @@ def _prepare() -> None: ) print("Opening the changelog in your browser...") - print("Please ask others to give it a check.") + print("Please ask #synapse-dev to give it a check.") click.launch( f"https://github.com/matrix-org/synapse/blob/{synapse_repo.active_branch.name}/CHANGES.md" ) diff --git a/stubs/frozendict.pyi b/stubs/frozendict.pyi deleted file mode 100644 index 196dee4461ea..000000000000 --- a/stubs/frozendict.pyi +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2020 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Stub for frozendict. - -from __future__ import annotations - -from typing import Any, Hashable, Iterable, Iterator, Mapping, Tuple, TypeVar, overload - -_KT = TypeVar("_KT", bound=Hashable) # Key type. -_VT = TypeVar("_VT") # Value type. - -class frozendict(Mapping[_KT, _VT]): - @overload - def __init__(self, **kwargs: _VT) -> None: ... - @overload - def __init__(self, __map: Mapping[_KT, _VT], **kwargs: _VT) -> None: ... - @overload - def __init__( - self, __iterable: Iterable[Tuple[_KT, _VT]], **kwargs: _VT - ) -> None: ... - def __getitem__(self, key: _KT) -> _VT: ... - def __contains__(self, key: Any) -> bool: ... - def copy(self, **add_or_replace: Any) -> frozendict: ... - def __iter__(self) -> Iterator[_KT]: ... - def __len__(self) -> int: ... - def __repr__(self) -> str: ... - def __hash__(self) -> int: ... diff --git a/synapse/__init__.py b/synapse/__init__.py index a203ed533ae5..b97ee59f15b6 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -17,9 +17,9 @@ """ This is an implementation of a Matrix homeserver. """ -import json import os import sys +from typing import Any, Dict from synapse.util.rust import check_rust_lib_up_to_date from synapse.util.stringutils import strtobool @@ -61,11 +61,20 @@ except ImportError: pass -# Use the standard library json implementation instead of simplejson. +# Teach canonicaljson how to serialise immutabledicts. try: - from canonicaljson import set_json_library - - set_json_library(json) + from canonicaljson import register_preserialisation_callback + from immutabledict import immutabledict + + def _immutabledict_cb(d: immutabledict) -> Dict[str, Any]: + try: + return d._dict + except Exception: + # Paranoia: fall back to a `dict()` call, in case a future version of + # immutabledict removes `_dict` from the implementation. + return dict(d) + + register_preserialisation_callback(immutabledict, _immutabledict_cb) except ImportError: pass diff --git a/synapse/_scripts/generate_workers_map.py b/synapse/_scripts/generate_workers_map.py new file mode 100755 index 000000000000..6c0887852368 --- /dev/null +++ b/synapse/_scripts/generate_workers_map.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python +# Copyright 2022-2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import logging +import re +from collections import defaultdict +from dataclasses import dataclass +from typing import Dict, Iterable, Optional, Pattern, Set, Tuple + +import yaml + +from synapse.config.homeserver import HomeServerConfig +from synapse.federation.transport.server import ( + TransportLayerServer, + register_servlets as register_federation_servlets, +) +from synapse.http.server import HttpServer, ServletCallback +from synapse.rest import ClientRestResource +from synapse.rest.key.v2 import RemoteKey +from synapse.server import HomeServer +from synapse.storage import DataStore + +logger = logging.getLogger("generate_workers_map") + + +class MockHomeserver(HomeServer): + DATASTORE_CLASS = DataStore # type: ignore + + def __init__(self, config: HomeServerConfig, worker_app: Optional[str]) -> None: + super().__init__(config.server.server_name, config=config) + self.config.worker.worker_app = worker_app + + +GROUP_PATTERN = re.compile(r"\(\?P<[^>]+?>(.+?)\)") + + +@dataclass +class EndpointDescription: + """ + Describes an endpoint and how it should be routed. + """ + + # The servlet class that handles this endpoint + servlet_class: object + + # The category of this endpoint. Is read from the `CATEGORY` constant in the servlet + # class. + category: Optional[str] + + # TODO: + # - does it need to be routed based on a stream writer config? + # - does it benefit from any optimised, but optional, routing? + # - what 'opinionated synapse worker class' (event_creator, synchrotron, etc) does + # it go in? + + +class EnumerationResource(HttpServer): + """ + Accepts servlet registrations for the purposes of building up a description of + all endpoints. + """ + + def __init__(self, is_worker: bool) -> None: + self.registrations: Dict[Tuple[str, str], EndpointDescription] = {} + self._is_worker = is_worker + + def register_paths( + self, + method: str, + path_patterns: Iterable[Pattern], + callback: ServletCallback, + servlet_classname: str, + ) -> None: + # federation servlet callbacks are wrapped, so unwrap them. + callback = getattr(callback, "__wrapped__", callback) + + # fish out the servlet class + servlet_class = callback.__self__.__class__ # type: ignore + + if self._is_worker and method in getattr( + servlet_class, "WORKERS_DENIED_METHODS", () + ): + # This endpoint would cause an error if called on a worker, so pretend it + # was never registered! + return + + sd = EndpointDescription( + servlet_class=servlet_class, + category=getattr(servlet_class, "CATEGORY", None), + ) + + for pat in path_patterns: + self.registrations[(method, pat.pattern)] = sd + + +def get_registered_paths_for_hs( + hs: HomeServer, +) -> Dict[Tuple[str, str], EndpointDescription]: + """ + Given a homeserver, get all registered endpoints and their descriptions. + """ + + enumerator = EnumerationResource(is_worker=hs.config.worker.worker_app is not None) + ClientRestResource.register_servlets(enumerator, hs) + federation_server = TransportLayerServer(hs) + + # we can't use `federation_server.register_servlets` but this line does the + # same thing, only it uses this enumerator + register_federation_servlets( + federation_server.hs, + resource=enumerator, + ratelimiter=federation_server.ratelimiter, + authenticator=federation_server.authenticator, + servlet_groups=federation_server.servlet_groups, + ) + + # the key server endpoints are separate again + RemoteKey(hs).register(enumerator) + + return enumerator.registrations + + +def get_registered_paths_for_default( + worker_app: Optional[str], base_config: HomeServerConfig +) -> Dict[Tuple[str, str], EndpointDescription]: + """ + Given the name of a worker application and a base homeserver configuration, + returns: + + Dict from (method, path) to EndpointDescription + + TODO Don't require passing in a config + """ + + hs = MockHomeserver(base_config, worker_app) + # TODO We only do this to avoid an error, but don't need the database etc + hs.setup() + return get_registered_paths_for_hs(hs) + + +def elide_http_methods_if_unconflicting( + registrations: Dict[Tuple[str, str], EndpointDescription], + all_possible_registrations: Dict[Tuple[str, str], EndpointDescription], +) -> Dict[Tuple[str, str], EndpointDescription]: + """ + Elides HTTP methods (by replacing them with `*`) if all possible registered methods + can be handled by the worker whose registration map is `registrations`. + + i.e. the only endpoints left with methods (other than `*`) should be the ones where + the worker can't handle all possible methods for that path. + """ + + def paths_to_methods_dict( + methods_and_paths: Iterable[Tuple[str, str]] + ) -> Dict[str, Set[str]]: + """ + Given (method, path) pairs, produces a dict from path to set of methods + available at that path. + """ + result: Dict[str, Set[str]] = {} + for method, path in methods_and_paths: + result.setdefault(path, set()).add(method) + return result + + all_possible_reg_methods = paths_to_methods_dict(all_possible_registrations) + reg_methods = paths_to_methods_dict(registrations) + + output = {} + + for path, handleable_methods in reg_methods.items(): + if handleable_methods == all_possible_reg_methods[path]: + any_method = next(iter(handleable_methods)) + # TODO This assumes that all methods have the same servlet. + # I suppose that's possibly dubious? + output[("*", path)] = registrations[(any_method, path)] + else: + for method in handleable_methods: + output[(method, path)] = registrations[(method, path)] + + return output + + +def simplify_path_regexes( + registrations: Dict[Tuple[str, str], EndpointDescription] +) -> Dict[Tuple[str, str], EndpointDescription]: + """ + Simplify all the path regexes for the dict of endpoint descriptions, + so that we don't use the Python-specific regex extensions + (and also to remove needlessly specific detail). + """ + + def simplify_path_regex(path: str) -> str: + """ + Given a regex pattern, replaces all named capturing groups (e.g. `(?Pxyz)`) + with a simpler version available in more common regex dialects (e.g. `.*`). + """ + + # TODO it's hard to choose between these two; + # `.*` is a vague simplification + # return GROUP_PATTERN.sub(r"\1", path) + return GROUP_PATTERN.sub(r".*", path) + + return {(m, simplify_path_regex(p)): v for (m, p), v in registrations.items()} + + +def main() -> None: + parser = argparse.ArgumentParser( + description=( + "Updates a synapse database to the latest schema and optionally runs background updates" + " on it." + ) + ) + parser.add_argument("-v", action="store_true") + parser.add_argument( + "--config-path", + type=argparse.FileType("r"), + required=True, + help="Synapse configuration file", + ) + + args = parser.parse_args() + + # TODO + # logging.basicConfig(**logging_config) + + # Load, process and sanity-check the config. + hs_config = yaml.safe_load(args.config_path) + + config = HomeServerConfig() + config.parse_config_dict(hs_config, "", "") + + master_paths = get_registered_paths_for_default(None, config) + worker_paths = get_registered_paths_for_default( + "synapse.app.generic_worker", config + ) + + all_paths = {**master_paths, **worker_paths} + + elided_worker_paths = elide_http_methods_if_unconflicting(worker_paths, all_paths) + elide_http_methods_if_unconflicting(master_paths, all_paths) + + # TODO SSO endpoints (pick_idp etc) NOT REGISTERED BY THIS SCRIPT + + categories_to_methods_and_paths: Dict[ + Optional[str], Dict[Tuple[str, str], EndpointDescription] + ] = defaultdict(dict) + + for (method, path), desc in elided_worker_paths.items(): + categories_to_methods_and_paths[desc.category][method, path] = desc + + for category, contents in categories_to_methods_and_paths.items(): + print_category(category, contents) + + +def print_category( + category_name: Optional[str], + elided_worker_paths: Dict[Tuple[str, str], EndpointDescription], +) -> None: + """ + Prints out a category, in documentation page style. + + Example: + ``` + # Category name + /path/xyz + + GET /path/abc + ``` + """ + + if category_name: + print(f"# {category_name}") + else: + print("# (Uncategorised requests)") + + for ln in sorted( + p for m, p in simplify_path_regexes(elided_worker_paths) if m == "*" + ): + print(ln) + print() + for ln in sorted( + f"{m:6} {p}" for m, p in simplify_path_regexes(elided_worker_paths) if m != "*" + ): + print(ln) + print() + + +if __name__ == "__main__": + main() diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 78d76d38ad90..a58ae2a308c6 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -18,6 +18,7 @@ import argparse import curses import logging +import os import sys import time import traceback @@ -58,6 +59,7 @@ from synapse.storage.databases.main.client_ips import ClientIpBackgroundUpdateStore from synapse.storage.databases.main.deviceinbox import DeviceInboxBackgroundUpdateStore from synapse.storage.databases.main.devices import DeviceBackgroundUpdateStore +from synapse.storage.databases.main.e2e_room_keys import EndToEndRoomKeyBackgroundStore from synapse.storage.databases.main.end_to_end_keys import EndToEndKeyBackgroundStore from synapse.storage.databases.main.event_push_actions import EventPushActionsStore from synapse.storage.databases.main.events_bg_updates import ( @@ -67,7 +69,10 @@ MediaRepositoryBackgroundUpdateStore, ) from synapse.storage.databases.main.presence import PresenceBackgroundUpdateStore -from synapse.storage.databases.main.pusher import PusherWorkerStore +from synapse.storage.databases.main.pusher import ( + PusherBackgroundUpdatesStore, + PusherWorkerStore, +) from synapse.storage.databases.main.receipts import ReceiptsBackgroundUpdateStore from synapse.storage.databases.main.registration import ( RegistrationBackgroundUpdateStore, @@ -221,10 +226,12 @@ class Store( MainStateBackgroundUpdateStore, UserDirectoryBackgroundUpdateStore, EndToEndKeyBackgroundStore, + EndToEndRoomKeyBackgroundStore, StatsStore, AccountDataWorkerStore, PushRuleStore, PusherWorkerStore, + PusherBackgroundUpdatesStore, PresenceBackgroundUpdateStore, ReceiptsBackgroundUpdateStore, RelationsWorkerStore, @@ -1326,6 +1333,13 @@ def main() -> None: filename="port-synapse.log" if args.curses else None, ) + if not os.path.isfile(args.sqlite_database): + sys.stderr.write( + "The sqlite database you specified does not exist, please check that you have the" + "correct path." + ) + sys.exit(1) + sqlite_config = { "name": "sqlite3", "args": { diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 85811ad30cca..ff9b4ecde6df 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -216,6 +216,8 @@ class EventContentFields: FEDERATE: Final = "m.federate" # The creator of the room, as used in `m.room.create` events. + # + # This is deprecated in MSC2175. ROOM_CREATOR: Final = "creator" # Used in m.room.guest_access events. diff --git a/synapse/api/errors.py b/synapse/api/errors.py index 8c6822f3c6ea..f2d6f9ab2d9e 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -27,7 +27,7 @@ if typing.TYPE_CHECKING: from synapse.config.homeserver import HomeServerConfig - from synapse.types import JsonDict + from synapse.types import JsonDict, StrCollection logger = logging.getLogger(__name__) @@ -682,18 +682,27 @@ class FederationPullAttemptBackoffError(RuntimeError): Attributes: event_id: The event_id which we are refusing to pull message: A custom error message that gives more context + retry_after_ms: The remaining backoff interval, in milliseconds """ - def __init__(self, event_ids: List[str], message: Optional[str]): - self.event_ids = event_ids + def __init__( + self, event_ids: "StrCollection", message: Optional[str], retry_after_ms: int + ): + event_ids = list(event_ids) if message: error_message = message else: - error_message = f"Not attempting to pull event_ids={self.event_ids} because we already tried to pull them recently (backing off)." + error_message = ( + f"Not attempting to pull event_ids={event_ids} because we already " + "tried to pull them recently (backing off)." + ) super().__init__(error_message) + self.event_ids = event_ids + self.retry_after_ms = retry_after_ms + class HttpResponseException(CodeMessageException): """ diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py index c397920fe54e..5d9c13e3c3fc 100644 --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -78,7 +78,10 @@ class RoomVersion: # MSC2209: Check 'notifications' key while verifying # m.room.power_levels auth rules. limit_notifications_power_levels: bool - # MSC2174/MSC2176: Apply updated redaction rules algorithm. + # MSC2175: No longer include the creator in m.room.create events. + msc2175_implicit_room_creator: bool + # MSC2174/MSC2176: Apply updated redaction rules algorithm, move redacts to + # content property. msc2176_redaction_rules: bool # MSC3083: Support the 'restricted' join_rule. msc3083_join_rules: bool @@ -104,6 +107,8 @@ class RoomVersion: # support the flag. Unknown flags are ignored by the evaluator, making conditions # fail if used. msc3931_push_features: Tuple[str, ...] # values from PushRuleRoomFlag + # MSC3989: Redact the origin field. + msc3989_redaction_rules: bool class RoomVersions: @@ -116,6 +121,7 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, @@ -125,6 +131,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V2 = RoomVersion( "2", @@ -135,6 +142,7 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, @@ -144,6 +152,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V3 = RoomVersion( "3", @@ -154,6 +163,7 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, @@ -163,6 +173,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V4 = RoomVersion( "4", @@ -173,6 +184,7 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, @@ -182,6 +194,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V5 = RoomVersion( "5", @@ -192,6 +205,7 @@ class RoomVersions: special_case_aliases_auth=True, strict_canonicaljson=False, limit_notifications_power_levels=False, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, @@ -201,6 +215,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V6 = RoomVersion( "6", @@ -211,6 +226,7 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, @@ -220,6 +236,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) MSC2176 = RoomVersion( "org.matrix.msc2176", @@ -230,6 +247,7 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=True, msc3083_join_rules=False, msc3375_redaction_rules=False, @@ -239,6 +257,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V7 = RoomVersion( "7", @@ -249,6 +268,7 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, @@ -258,6 +278,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V8 = RoomVersion( "8", @@ -268,6 +289,7 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=True, msc3375_redaction_rules=False, @@ -277,6 +299,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V9 = RoomVersion( "9", @@ -287,6 +310,7 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=True, msc3375_redaction_rules=True, @@ -296,6 +320,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) MSC3787 = RoomVersion( "org.matrix.msc3787", @@ -306,6 +331,7 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=True, msc3375_redaction_rules=True, @@ -315,6 +341,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) V10 = RoomVersion( "10", @@ -325,6 +352,7 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=True, msc3375_redaction_rules=True, @@ -334,6 +362,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, msc3931_push_features=(), + msc3989_redaction_rules=False, ) MSC2716v4 = RoomVersion( "org.matrix.msc2716v4", @@ -344,6 +373,7 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=False, msc3375_redaction_rules=False, @@ -353,6 +383,7 @@ class RoomVersions: msc3787_knock_restricted_join_rule=False, msc3667_int_only_power_levels=False, msc3931_push_features=(), + msc3989_redaction_rules=False, ) MSC1767v10 = RoomVersion( # MSC1767 (Extensible Events) based on room version "10" @@ -364,6 +395,7 @@ class RoomVersions: special_case_aliases_auth=False, strict_canonicaljson=True, limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, msc2176_redaction_rules=False, msc3083_join_rules=True, msc3375_redaction_rules=True, @@ -373,6 +405,28 @@ class RoomVersions: msc3787_knock_restricted_join_rule=True, msc3667_int_only_power_levels=True, msc3931_push_features=(PushRuleRoomFlag.EXTENSIBLE_EVENTS,), + msc3989_redaction_rules=False, + ) + MSC3989 = RoomVersion( + "org.matrix.msc3989", + RoomDisposition.UNSTABLE, + EventFormatVersions.ROOM_V4_PLUS, + StateResolutionVersions.V2, + enforce_key_validity=True, + special_case_aliases_auth=False, + strict_canonicaljson=True, + limit_notifications_power_levels=True, + msc2175_implicit_room_creator=False, + msc2176_redaction_rules=False, + msc3083_join_rules=True, + msc3375_redaction_rules=True, + msc2403_knocking=True, + msc2716_historical=False, + msc2716_redactions=False, + msc3787_knock_restricted_join_rule=True, + msc3667_int_only_power_levels=True, + msc3931_push_features=(), + msc3989_redaction_rules=True, ) @@ -392,6 +446,7 @@ class RoomVersions: RoomVersions.MSC3787, RoomVersions.V10, RoomVersions.MSC2716v4, + RoomVersions.MSC3989, ) } diff --git a/synapse/app/_base.py b/synapse/app/_base.py index 28062dd69d3a..954402e4d2cc 100644 --- a/synapse/app/_base.py +++ b/synapse/app/_base.py @@ -41,7 +41,12 @@ import twisted from twisted.internet import defer, error, reactor as _reactor -from twisted.internet.interfaces import IOpenSSLContextFactory, IReactorSSL, IReactorTCP +from twisted.internet.interfaces import ( + IOpenSSLContextFactory, + IReactorSSL, + IReactorTCP, + IReactorUNIX, +) from twisted.internet.protocol import ServerFactory from twisted.internet.tcp import Port from twisted.logger import LoggingFile, LogLevel @@ -56,10 +61,9 @@ from synapse.config import ConfigError from synapse.config._base import format_config_error from synapse.config.homeserver import HomeServerConfig -from synapse.config.server import ListenerConfig, ManholeConfig +from synapse.config.server import ListenerConfig, ManholeConfig, TCPListenerConfig from synapse.crypto import context_factory from synapse.events.presence_router import load_legacy_presence_router -from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.http.site import SynapseSite @@ -68,6 +72,7 @@ from synapse.metrics import install_gc_manager, register_threadpool from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.metrics.jemalloc import setup_jemalloc_stats +from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers from synapse.types import ISynapseReactor from synapse.util import SYNAPSE_VERSION from synapse.util.caches.lrucache import setup_expire_lru_cache_entries @@ -351,6 +356,28 @@ def listen_tcp( return r # type: ignore[return-value] +def listen_unix( + path: str, + mode: int, + factory: ServerFactory, + reactor: IReactorUNIX = reactor, + backlog: int = 50, +) -> List[Port]: + """ + Create a UNIX socket for a given path and 'mode' permission + + Returns: + list of twisted.internet.tcp.Port listening for TCP connections + """ + wantPID = True + + return [ + # IReactorUNIX returns an object implementing IListeningPort from listenUNIX, + # but we know it will be a Port instance. + cast(Port, reactor.listenUNIX(path, factory, backlog, mode, wantPID)) + ] + + def listen_http( listener_config: ListenerConfig, root_resource: Resource, @@ -359,18 +386,13 @@ def listen_http( context_factory: Optional[IOpenSSLContextFactory], reactor: ISynapseReactor = reactor, ) -> List[Port]: - port = listener_config.port - bind_addresses = listener_config.bind_addresses - tls = listener_config.tls - assert listener_config.http_options is not None - site_tag = listener_config.http_options.tag - if site_tag is None: - site_tag = str(port) + site_tag = listener_config.get_site_tag() site = SynapseSite( - "synapse.access.%s.%s" % ("https" if tls else "http", site_tag), + "synapse.access.%s.%s" + % ("https" if listener_config.is_tls() else "http", site_tag), site_tag, listener_config, root_resource, @@ -378,25 +400,41 @@ def listen_http( max_request_body_size=max_request_body_size, reactor=reactor, ) - if tls: - # refresh_certificate should have been called before this. - assert context_factory is not None - ports = listen_ssl( - bind_addresses, - port, - site, - context_factory, - reactor=reactor, - ) - logger.info("Synapse now listening on TCP port %d (TLS)", port) + + if isinstance(listener_config, TCPListenerConfig): + if listener_config.is_tls(): + # refresh_certificate should have been called before this. + assert context_factory is not None + ports = listen_ssl( + listener_config.bind_addresses, + listener_config.port, + site, + context_factory, + reactor=reactor, + ) + logger.info( + "Synapse now listening on TCP port %d (TLS)", listener_config.port + ) + else: + ports = listen_tcp( + listener_config.bind_addresses, + listener_config.port, + site, + reactor=reactor, + ) + logger.info("Synapse now listening on TCP port %d", listener_config.port) + else: - ports = listen_tcp( - bind_addresses, - port, - site, - reactor=reactor, + ports = listen_unix( + listener_config.path, listener_config.mode, site, reactor=reactor ) - logger.info("Synapse now listening on TCP port %d", port) + # getHost() returns a UNIXAddress which contains an instance variable of 'name' + # encoded as a byte string. Decode as utf-8 so pretty. + logger.info( + "Synapse now listening on Unix Socket at: " + f"{ports[0].getHost().name.decode('utf-8')}" + ) + return ports diff --git a/synapse/app/generic_worker.py b/synapse/app/generic_worker.py index 3c4100bf7129..d54dcf53c1d6 100644 --- a/synapse/app/generic_worker.py +++ b/synapse/app/generic_worker.py @@ -38,7 +38,7 @@ from synapse.config._base import ConfigError from synapse.config.homeserver import HomeServerConfig from synapse.config.logger import setup_logging -from synapse.config.server import ListenerConfig +from synapse.config.server import ListenerConfig, TCPListenerConfig from synapse.federation.transport.server import TransportLayerServer from synapse.http.server import JsonResource, OptionsResource from synapse.logging.context import LoggingContext @@ -238,12 +238,18 @@ def start_listening(self) -> None: if listener.type == "http": self._listen_http(listener) elif listener.type == "manhole": - _base.listen_manhole( - listener.bind_addresses, - listener.port, - manhole_settings=self.config.server.manhole_settings, - manhole_globals={"hs": self}, - ) + if isinstance(listener, TCPListenerConfig): + _base.listen_manhole( + listener.bind_addresses, + listener.port, + manhole_settings=self.config.server.manhole_settings, + manhole_globals={"hs": self}, + ) + else: + raise ConfigError( + "Can not using a unix socket for manhole at this time." + ) + elif listener.type == "metrics": if not self.config.metrics.enable_metrics: logger.warning( @@ -251,10 +257,16 @@ def start_listening(self) -> None: "enable_metrics is not True!" ) else: - _base.listen_metrics( - listener.bind_addresses, - listener.port, - ) + if isinstance(listener, TCPListenerConfig): + _base.listen_metrics( + listener.bind_addresses, + listener.port, + ) + else: + raise ConfigError( + "Can not use a unix socket for metrics at this time." + ) + else: logger.warning("Unsupported listener type: %s", listener.type) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index b8830b1a9c16..84236ac299e0 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -44,7 +44,7 @@ ) from synapse.config._base import ConfigError, format_config_error from synapse.config.homeserver import HomeServerConfig -from synapse.config.server import ListenerConfig +from synapse.config.server import ListenerConfig, TCPListenerConfig from synapse.federation.transport.server import TransportLayerServer from synapse.http.additional_resource import AdditionalResource from synapse.http.server import ( @@ -78,14 +78,13 @@ class SynapseHomeServer(HomeServer): DATASTORE_CLASS = DataStore # type: ignore def _listener_http( - self, config: HomeServerConfig, listener_config: ListenerConfig + self, + config: HomeServerConfig, + listener_config: ListenerConfig, ) -> Iterable[Port]: - port = listener_config.port # Must exist since this is an HTTP listener. assert listener_config.http_options is not None - site_tag = listener_config.http_options.tag - if site_tag is None: - site_tag = str(port) + site_tag = listener_config.get_site_tag() # We always include a health resource. resources: Dict[str, Resource] = {"/health": HealthResource()} @@ -252,12 +251,17 @@ def start_listening(self) -> None: self._listener_http(self.config, listener) ) elif listener.type == "manhole": - _base.listen_manhole( - listener.bind_addresses, - listener.port, - manhole_settings=self.config.server.manhole_settings, - manhole_globals={"hs": self}, - ) + if isinstance(listener, TCPListenerConfig): + _base.listen_manhole( + listener.bind_addresses, + listener.port, + manhole_settings=self.config.server.manhole_settings, + manhole_globals={"hs": self}, + ) + else: + raise ConfigError( + "Can not use a unix socket for manhole at this time." + ) elif listener.type == "metrics": if not self.config.metrics.enable_metrics: logger.warning( @@ -265,10 +269,16 @@ def start_listening(self) -> None: "enable_metrics is not True!" ) else: - _base.listen_metrics( - listener.bind_addresses, - listener.port, - ) + if isinstance(listener, TCPListenerConfig): + _base.listen_metrics( + listener.bind_addresses, + listener.port, + ) + else: + raise ConfigError( + "Can not use a unix socket for metrics at this time." + ) + else: # this shouldn't happen, as the listener type should have been checked # during parsing diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 4812fb44961f..86ddb1bb289e 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -17,6 +17,8 @@ from typing import ( TYPE_CHECKING, Any, + Awaitable, + Callable, Dict, Iterable, List, @@ -24,13 +26,14 @@ Optional, Sequence, Tuple, + TypeVar, ) from prometheus_client import Counter -from typing_extensions import TypeGuard +from typing_extensions import Concatenate, ParamSpec, TypeGuard from synapse.api.constants import EventTypes, Membership, ThirdPartyEntityKind -from synapse.api.errors import CodeMessageException +from synapse.api.errors import CodeMessageException, HttpResponseException from synapse.appservice import ( ApplicationService, TransactionOneTimeKeysCount, @@ -38,7 +41,7 @@ ) from synapse.events import EventBase from synapse.events.utils import SerializeEventConfig, serialize_event -from synapse.http.client import SimpleHttpClient +from synapse.http.client import SimpleHttpClient, is_unknown_endpoint from synapse.types import DeviceListUpdates, JsonDict, ThirdPartyInstanceID from synapse.util.caches.response_cache import ResponseCache @@ -78,7 +81,11 @@ HOUR_IN_MS = 60 * 60 * 1000 -APP_SERVICE_PREFIX = "/_matrix/app/unstable" +APP_SERVICE_PREFIX = "/_matrix/app/v1" +APP_SERVICE_UNSTABLE_PREFIX = "/_matrix/app/unstable" + +P = ParamSpec("P") +R = TypeVar("R") def _is_valid_3pe_metadata(info: JsonDict) -> bool: @@ -121,6 +128,47 @@ def __init__(self, hs: "HomeServer"): hs.get_clock(), "as_protocol_meta", timeout_ms=HOUR_IN_MS ) + async def _send_with_fallbacks( + self, + service: "ApplicationService", + prefixes: List[str], + path: str, + func: Callable[Concatenate[str, P], Awaitable[R]], + *args: P.args, + **kwargs: P.kwargs, + ) -> R: + """ + Attempt to call an application service with multiple paths, falling back + until one succeeds. + + Args: + service: The appliacation service, this provides the base URL. + prefixes: A last of paths to try in order for the requests. + path: A suffix to append to each prefix. + func: The function to call, the first argument will be the full + endpoint to fetch. Other arguments are provided by args/kwargs. + + Returns: + The return value of func. + """ + for i, prefix in enumerate(prefixes, start=1): + uri = f"{service.url}{prefix}{path}" + try: + return await func(uri, *args, **kwargs) + except HttpResponseException as e: + # If an error is received that is due to an unrecognised path, + # fallback to next path (if one exists). Otherwise, consider it + # a legitimate error and raise. + if i < len(prefixes) and is_unknown_endpoint(e): + continue + raise + except Exception: + # Unexpected exceptions get sent to the caller. + raise + + # The function should always exit via the return or raise above this. + raise RuntimeError("Unexpected fallback behaviour. This should never be seen.") + async def query_user(self, service: "ApplicationService", user_id: str) -> bool: if service.url is None: return False @@ -128,10 +176,12 @@ async def query_user(self, service: "ApplicationService", user_id: str) -> bool: # This is required by the configuration. assert service.hs_token is not None - uri = service.url + ("/users/%s" % urllib.parse.quote(user_id)) try: - response = await self.get_json( - uri, + response = await self._send_with_fallbacks( + service, + [APP_SERVICE_PREFIX, ""], + f"/users/{urllib.parse.quote(user_id)}", + self.get_json, {"access_token": service.hs_token}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) @@ -140,9 +190,9 @@ async def query_user(self, service: "ApplicationService", user_id: str) -> bool: except CodeMessageException as e: if e.code == 404: return False - logger.warning("query_user to %s received %s", uri, e.code) + logger.warning("query_user to %s received %s", service.url, e.code) except Exception as ex: - logger.warning("query_user to %s threw exception %s", uri, ex) + logger.warning("query_user to %s threw exception %s", service.url, ex) return False async def query_alias(self, service: "ApplicationService", alias: str) -> bool: @@ -152,21 +202,23 @@ async def query_alias(self, service: "ApplicationService", alias: str) -> bool: # This is required by the configuration. assert service.hs_token is not None - uri = service.url + ("/rooms/%s" % urllib.parse.quote(alias)) try: - response = await self.get_json( - uri, + response = await self._send_with_fallbacks( + service, + [APP_SERVICE_PREFIX, ""], + f"/rooms/{urllib.parse.quote(alias)}", + self.get_json, {"access_token": service.hs_token}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if response is not None: # just an empty json object return True except CodeMessageException as e: - logger.warning("query_alias to %s received %s", uri, e.code) + logger.warning("query_alias to %s received %s", service.url, e.code) if e.code == 404: return False except Exception as ex: - logger.warning("query_alias to %s threw exception %s", uri, ex) + logger.warning("query_alias to %s threw exception %s", service.url, ex) return False async def query_3pe( @@ -188,25 +240,24 @@ async def query_3pe( # This is required by the configuration. assert service.hs_token is not None - uri = "%s%s/thirdparty/%s/%s" % ( - service.url, - APP_SERVICE_PREFIX, - kind, - urllib.parse.quote(protocol), - ) try: args: Mapping[Any, Any] = { **fields, b"access_token": service.hs_token, } - response = await self.get_json( - uri, + response = await self._send_with_fallbacks( + service, + [APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX], + f"/thirdparty/{kind}/{urllib.parse.quote(protocol)}", + self.get_json, args=args, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if not isinstance(response, list): logger.warning( - "query_3pe to %s returned an invalid response %r", uri, response + "query_3pe to %s returned an invalid response %r", + service.url, + response, ) return [] @@ -216,12 +267,12 @@ async def query_3pe( ret.append(r) else: logger.warning( - "query_3pe to %s returned an invalid result %r", uri, r + "query_3pe to %s returned an invalid result %r", service.url, r ) return ret except Exception as ex: - logger.warning("query_3pe to %s threw exception %s", uri, ex) + logger.warning("query_3pe to %s threw exception %s", service.url, ex) return [] async def get_3pe_protocol( @@ -233,21 +284,20 @@ async def get_3pe_protocol( async def _get() -> Optional[JsonDict]: # This is required by the configuration. assert service.hs_token is not None - uri = "%s%s/thirdparty/protocol/%s" % ( - service.url, - APP_SERVICE_PREFIX, - urllib.parse.quote(protocol), - ) try: - info = await self.get_json( - uri, + info = await self._send_with_fallbacks( + service, + [APP_SERVICE_PREFIX, APP_SERVICE_UNSTABLE_PREFIX], + f"/thirdparty/protocol/{urllib.parse.quote(protocol)}", + self.get_json, {"access_token": service.hs_token}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) if not _is_valid_3pe_metadata(info): logger.warning( - "query_3pe_protocol to %s did not return a valid result", uri + "query_3pe_protocol to %s did not return a valid result", + service.url, ) return None @@ -260,7 +310,9 @@ async def _get() -> Optional[JsonDict]: return info except Exception as ex: - logger.warning("query_3pe_protocol to %s threw exception %s", uri, ex) + logger.warning( + "query_3pe_protocol to %s threw exception %s", service.url, ex + ) return None key = (service.id, protocol) @@ -274,7 +326,7 @@ async def ping(self, service: "ApplicationService", txn_id: Optional[str]) -> No assert service.hs_token is not None await self.post_json_get_json( - uri=service.url + "/_matrix/app/unstable/fi.mau.msc2659/ping", + uri=f"{service.url}{APP_SERVICE_UNSTABLE_PREFIX}/fi.mau.msc2659/ping", post_json={"transaction_id": txn_id}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, ) @@ -318,8 +370,6 @@ async def push_bulk( ) txn_id = 0 - uri = service.url + ("/transactions/%s" % urllib.parse.quote(str(txn_id))) - # Never send ephemeral events to appservices that do not support it body: JsonDict = {"events": serialized_events} if service.supports_ephemeral: @@ -351,8 +401,11 @@ async def push_bulk( } try: - await self.put_json( - uri=uri, + await self._send_with_fallbacks( + service, + [APP_SERVICE_PREFIX, ""], + f"/transactions/{urllib.parse.quote(str(txn_id))}", + self.put_json, json_body=body, args={"access_token": service.hs_token}, headers={"Authorization": [f"Bearer {service.hs_token}"]}, @@ -360,7 +413,7 @@ async def push_bulk( if logger.isEnabledFor(logging.DEBUG): logger.debug( "push_bulk to %s succeeded! events=%s", - uri, + service.url, [event.get("event_id") for event in events], ) sent_transactions_counter.labels(service.id).inc() @@ -371,7 +424,7 @@ async def push_bulk( except CodeMessageException as e: logger.warning( "push_bulk to %s received code=%s msg=%s", - uri, + service.url, e.code, e.msg, exc_info=logger.isEnabledFor(logging.DEBUG), @@ -379,7 +432,7 @@ async def push_bulk( except Exception as ex: logger.warning( "push_bulk to %s threw exception(%s) %s args=%s", - uri, + service.url, type(ex).__name__, ex, ex.args, @@ -388,6 +441,108 @@ async def push_bulk( failed_transactions_counter.labels(service.id).inc() return False + async def claim_client_keys( + self, service: "ApplicationService", query: List[Tuple[str, str, str]] + ) -> Tuple[Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str]]]: + """Claim one time keys from an application service. + + Note that any error (including a timeout) is treated as the application + service having no information. + + Args: + service: The application service to query. + query: An iterable of tuples of (user ID, device ID, algorithm). + + Returns: + A tuple of: + A map of user ID -> a map device ID -> a map of key ID -> JSON dict. + + A copy of the input which has not been fulfilled because the + appservice doesn't support this endpoint or has not returned + data for that tuple. + """ + if service.url is None: + return {}, query + + # This is required by the configuration. + assert service.hs_token is not None + + # Create the expected payload shape. + body: Dict[str, Dict[str, List[str]]] = {} + for user_id, device, algorithm in query: + body.setdefault(user_id, {}).setdefault(device, []).append(algorithm) + + uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3983/keys/claim" + try: + response = await self.post_json_get_json( + uri, + body, + headers={"Authorization": [f"Bearer {service.hs_token}"]}, + ) + except HttpResponseException as e: + # The appservice doesn't support this endpoint. + if is_unknown_endpoint(e): + return {}, query + logger.warning("claim_keys to %s received %s", uri, e.code) + return {}, query + except Exception as ex: + logger.warning("claim_keys to %s threw exception %s", uri, ex) + return {}, query + + # Check if the appservice fulfilled all of the queried user/device/algorithms + # or if some are still missing. + # + # TODO This places a lot of faith in the response shape being correct. + missing = [ + (user_id, device, algorithm) + for user_id, device, algorithm in query + if algorithm not in response.get(user_id, {}).get(device, []) + ] + + return response, missing + + async def query_keys( + self, service: "ApplicationService", query: Dict[str, List[str]] + ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + """Query the application service for keys. + + Note that any error (including a timeout) is treated as the application + service having no information. + + Args: + service: The application service to query. + query: An iterable of tuples of (user ID, device ID, algorithm). + + Returns: + A map of device_keys/master_keys/self_signing_keys/user_signing_keys: + + device_keys is a map of user ID -> a map device ID -> device info. + """ + if service.url is None: + return {} + + # This is required by the configuration. + assert service.hs_token is not None + + uri = f"{service.url}/_matrix/app/unstable/org.matrix.msc3984/keys/query" + try: + response = await self.post_json_get_json( + uri, + query, + headers={"Authorization": [f"Bearer {service.hs_token}"]}, + ) + except HttpResponseException as e: + # The appservice doesn't support this endpoint. + if is_unknown_endpoint(e): + return {} + logger.warning("query_keys to %s received %s", uri, e.code) + return {} + except Exception as ex: + logger.warning("query_keys to %s threw exception %s", uri, ex) + return {} + + return response + def _serialize( self, service: "ApplicationService", events: Iterable[EventBase] ) -> List[JsonDict]: diff --git a/synapse/config/_util.py b/synapse/config/_util.py index d3a4b484abb8..dfc5d1221063 100644 --- a/synapse/config/_util.py +++ b/synapse/config/_util.py @@ -11,9 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Iterable +from typing import Any, Dict, Iterable, Type, TypeVar import jsonschema +from pydantic import BaseModel, ValidationError, parse_obj_as from synapse.config._base import ConfigError from synapse.types import JsonDict @@ -64,3 +65,28 @@ def json_error_to_config_error( else: path.append(str(p)) return ConfigError(e.message, path) + + +Model = TypeVar("Model", bound=BaseModel) + + +def parse_and_validate_mapping( + config: Any, + model_type: Type[Model], +) -> Dict[str, Model]: + """Parse `config` as a mapping from strings to a given `Model` type. + Args: + config: The configuration data to check + model_type: The BaseModel to validate and parse against. + Returns: + Fully validated and parsed Dict[str, Model]. + Raises: + ConfigError, if given improper input. + """ + try: + # type-ignore: mypy doesn't like constructing `Dict[str, model_type]` because + # `model_type` is a runtime variable. Pydantic is fine with this. + instances = parse_obj_as(Dict[str, model_type], config) # type: ignore[valid-type] + except ValidationError as e: + raise ConfigError(str(e)) from e + return instances diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 00182090b2ea..fd89960e7261 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -33,6 +33,16 @@ class AppServiceConfig(Config): def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.app_service_config_files = config.get("app_service_config_files", []) + if not isinstance(self.app_service_config_files, list) or not all( + type(x) is str for x in self.app_service_config_files + ): + # type-ignore: this function gets arbitrary json value; we do use this path. + raise ConfigError( + "Expected '%s' to be a list of AS config files:" + % (self.app_service_config_files), + "app_service_config_files", + ) + self.track_appservice_user_ips = config.get("track_appservice_user_ips", False) @@ -40,10 +50,6 @@ def load_appservices( hostname: str, config_files: List[str] ) -> List[ApplicationService]: """Returns a list of Application Services from the config files.""" - if not isinstance(config_files, list): - # type-ignore: this function gets arbitrary json value; we do use this path. - logger.warning("Expected %s to be a list of AS config files.", config_files) # type: ignore[unreachable] - return [] # Dicts of value -> filename seen_as_tokens: Dict[str, str] = {} diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index e9442b17bd7b..d616b3dcfd5c 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -78,6 +78,16 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: "msc3202_transaction_extensions", False ) + # MSC3983: Proxying OTK claim requests to exclusive ASes. + self.msc3983_appservice_otk_claims: bool = experimental.get( + "msc3983_appservice_otk_claims", False + ) + + # MSC3984: Proxying key queries to exclusive ASes. + self.msc3984_appservice_key_query: bool = experimental.get( + "msc3984_appservice_key_query", False + ) + # MSC3706 (server-side support for partial state in /send_join responses) # Synapse will always serve partial state responses to requests using the stable # query parameter `omit_members`. If this flag is set, Synapse will also serve diff --git a/synapse/config/oidc.py b/synapse/config/oidc.py index df8c42204392..77c1d1dc8e0f 100644 --- a/synapse/config/oidc.py +++ b/synapse/config/oidc.py @@ -136,6 +136,7 @@ def oidc_enabled(self) -> bool: "type": "array", "items": SsoAttributeRequirement.JSON_SCHEMA, }, + "enable_registration": {"type": "boolean"}, }, } @@ -306,6 +307,7 @@ def _parse_oidc_config_dict( user_mapping_provider_class=user_mapping_provider_class, user_mapping_provider_config=user_mapping_provider_config, attribute_requirements=attribute_requirements, + enable_registration=oidc_config.get("enable_registration", True), ) @@ -405,3 +407,6 @@ class OidcProviderConfig: # required attributes to require in userinfo to allow login/registration attribute_requirements: List[SsoAttributeRequirement] + + # Whether automatic registrations are enabled in the ODIC flow. Defaults to True + enable_registration: bool diff --git a/synapse/config/server.py b/synapse/config/server.py index 0e46b849cf06..386c3194b85a 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -214,17 +214,52 @@ class HttpListenerConfig: @attr.s(slots=True, frozen=True, auto_attribs=True) -class ListenerConfig: - """Object describing the configuration of a single listener.""" +class TCPListenerConfig: + """Object describing the configuration of a single TCP listener.""" port: int = attr.ib(validator=attr.validators.instance_of(int)) - bind_addresses: List[str] + bind_addresses: List[str] = attr.ib(validator=attr.validators.instance_of(List)) type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES)) tls: bool = False # http_options is only populated if type=http http_options: Optional[HttpListenerConfig] = None + def get_site_tag(self) -> str: + """Retrieves http_options.tag if it exists, otherwise the port number.""" + if self.http_options and self.http_options.tag is not None: + return self.http_options.tag + else: + return str(self.port) + + def is_tls(self) -> bool: + return self.tls + + +@attr.s(slots=True, frozen=True, auto_attribs=True) +class UnixListenerConfig: + """Object describing the configuration of a single Unix socket listener.""" + + # Note: unix sockets can not be tls encrypted, so HAVE to be behind a tls-handling + # reverse proxy + path: str = attr.ib() + # A default(0o666) for this is set in parse_listener_def() below + mode: int + type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES)) + + # http_options is only populated if type=http + http_options: Optional[HttpListenerConfig] = None + + def get_site_tag(self) -> str: + return "unix" + + def is_tls(self) -> bool: + """Unix sockets can't have TLS""" + return False + + +ListenerConfig = Union[TCPListenerConfig, UnixListenerConfig] + @attr.s(slots=True, frozen=True, auto_attribs=True) class ManholeConfig: @@ -531,12 +566,12 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)] - # no_tls is not really supported any more, but let's grandfather it in - # here. + # no_tls is not really supported anymore, but let's grandfather it in here. if config.get("no_tls", False): l2 = [] for listener in self.listeners: - if listener.tls: + if isinstance(listener, TCPListenerConfig) and listener.tls: + # Use isinstance() as the assertion this *has* a listener.port logger.info( "Ignoring TLS-enabled listener on port %i due to no_tls", listener.port, @@ -577,7 +612,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: ) self.listeners.append( - ListenerConfig( + TCPListenerConfig( port=bind_port, bind_addresses=[bind_host], tls=True, @@ -589,7 +624,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: unsecure_port = config.get("unsecure_port", bind_port - 400) if unsecure_port: self.listeners.append( - ListenerConfig( + TCPListenerConfig( port=unsecure_port, bind_addresses=[bind_host], tls=False, @@ -601,7 +636,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: manhole = config.get("manhole") if manhole: self.listeners.append( - ListenerConfig( + TCPListenerConfig( port=manhole, bind_addresses=["127.0.0.1"], type="manhole", @@ -648,7 +683,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: logger.warning(METRICS_PORT_WARNING) self.listeners.append( - ListenerConfig( + TCPListenerConfig( port=metrics_port, bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")], type="http", @@ -724,7 +759,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.delete_stale_devices_after = None def has_tls_listener(self) -> bool: - return any(listener.tls for listener in self.listeners) + return any(listener.is_tls() for listener in self.listeners) def generate_config_section( self, @@ -904,25 +939,25 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig: raise ConfigError(DIRECT_TCP_ERROR, ("listeners", str(num), "type")) port = listener.get("port") - if type(port) is not int: + socket_path = listener.get("path") + # Either a port or a path should be declared at a minimum. Using both would be bad. + if port is not None and not isinstance(port, int): raise ConfigError("Listener configuration is lacking a valid 'port' option") + if socket_path is not None and not isinstance(socket_path, str): + raise ConfigError("Listener configuration is lacking a valid 'path' option") + if port and socket_path: + raise ConfigError( + "Can not have both a UNIX socket and an IP/port declared for the same " + "resource!" + ) + if port is None and socket_path is None: + raise ConfigError( + "Must have either a UNIX socket or an IP/port declared for a given " + "resource!" + ) tls = listener.get("tls", False) - bind_addresses = listener.get("bind_addresses", []) - bind_address = listener.get("bind_address") - # if bind_address was specified, add it to the list of addresses - if bind_address: - bind_addresses.append(bind_address) - - # if we still have an empty list of addresses, use the default list - if not bind_addresses: - if listener_type == "metrics": - # the metrics listener doesn't support IPv6 - bind_addresses.append("0.0.0.0") - else: - bind_addresses.extend(DEFAULT_BIND_ADDRESSES) - http_config = None if listener_type == "http": try: @@ -932,8 +967,12 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig: except ValueError as e: raise ConfigError("Unknown listener resource") from e + # For a unix socket, default x_forwarded to True, as this is the only way of + # getting a client IP. + # Note: a reverse proxy is required anyway, as there is no way of exposing a + # unix socket to the internet. http_config = HttpListenerConfig( - x_forwarded=listener.get("x_forwarded", False), + x_forwarded=listener.get("x_forwarded", (True if socket_path else False)), resources=resources, additional_resources=listener.get("additional_resources", {}), tag=listener.get("tag"), @@ -941,7 +980,30 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig: experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False), ) - return ListenerConfig(port, bind_addresses, listener_type, tls, http_config) + if socket_path: + # TODO: Add in path validation, like if the directory exists and is writable? + # Set a default for the permission, in case it's left out + socket_mode = listener.get("mode", 0o666) + + return UnixListenerConfig(socket_path, socket_mode, listener_type, http_config) + + else: + assert port is not None + bind_addresses = listener.get("bind_addresses", []) + bind_address = listener.get("bind_address") + # if bind_address was specified, add it to the list of addresses + if bind_address: + bind_addresses.append(bind_address) + + # if we still have an empty list of addresses, use the default list + if not bind_addresses: + if listener_type == "metrics": + # the metrics listener doesn't support IPv6 + bind_addresses.append("0.0.0.0") + else: + bind_addresses.extend(DEFAULT_BIND_ADDRESSES) + + return TCPListenerConfig(port, bind_addresses, listener_type, tls, http_config) _MANHOLE_SETTINGS_SCHEMA = { diff --git a/synapse/config/workers.py b/synapse/config/workers.py index 2580660b6c27..95b4047f1d3f 100644 --- a/synapse/config/workers.py +++ b/synapse/config/workers.py @@ -18,16 +18,21 @@ from typing import Any, Dict, List, Union import attr +from pydantic import BaseModel, Extra, StrictBool, StrictInt, StrictStr -from synapse.types import JsonDict - -from ._base import ( +from synapse.config._base import ( Config, ConfigError, RoutableShardedWorkerHandlingConfig, ShardedWorkerHandlingConfig, ) -from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def +from synapse.config._util import parse_and_validate_mapping +from synapse.config.server import ( + DIRECT_TCP_ERROR, + TCPListenerConfig, + parse_listener_def, +) +from synapse.types import JsonDict _DEPRECATED_WORKER_DUTY_OPTION_USED = """ The '%s' configuration option is deprecated and will be removed in a future @@ -47,13 +52,43 @@ def _instance_to_list_converter(obj: Union[str, List[str]]) -> List[str]: return obj -@attr.s(auto_attribs=True) -class InstanceLocationConfig: +class ConfigModel(BaseModel): + """A custom version of Pydantic's BaseModel which + + - ignores unknown fields and + - does not allow fields to be overwritten after construction, + + but otherwise uses Pydantic's default behaviour. + + For now, ignore unknown fields. In the future, we could change this so that unknown + config values cause a ValidationError, provided the error messages are meaningful to + server operators. + + Subclassing in this way is recommended by + https://pydantic-docs.helpmanual.io/usage/model_config/#change-behaviour-globally + """ + + class Config: + # By default, ignore fields that we don't recognise. + extra = Extra.ignore + # By default, don't allow fields to be reassigned after parsing. + allow_mutation = False + + +class InstanceLocationConfig(ConfigModel): """The host and port to talk to an instance via HTTP replication.""" - host: str - port: int - tls: bool = False + host: StrictStr + port: StrictInt + tls: StrictBool = False + + def scheme(self) -> str: + """Hardcode a retrievable scheme based on self.tls""" + return "https" if self.tls else "http" + + def netloc(self) -> str: + """Nicely format the network location data""" + return f"{self.host}:{self.port}" @attr.s @@ -161,7 +196,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: manhole = config.get("worker_manhole") if manhole: self.worker_listeners.append( - ListenerConfig( + TCPListenerConfig( port=manhole, bind_addresses=["127.0.0.1"], type="manhole", @@ -180,10 +215,12 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: ) # A map from instance name to host/port of their HTTP replication endpoint. - instance_map = config.get("instance_map") or {} - self.instance_map = { - name: InstanceLocationConfig(**c) for name, c in instance_map.items() - } + self.instance_map: Dict[ + str, InstanceLocationConfig + ] = parse_and_validate_mapping( + config.get("instance_map", {}), + InstanceLocationConfig, + ) # Map from type of streams to source, c.f. WriterLocations. writers = config.get("stream_writers") or {} diff --git a/synapse/crypto/event_signing.py b/synapse/crypto/event_signing.py index 23b799ac3250..1a293f1df008 100644 --- a/synapse/crypto/event_signing.py +++ b/synapse/crypto/event_signing.py @@ -51,7 +51,7 @@ def check_event_content_hash( # some malformed events lack a 'hashes'. Protect against it being missing # or a weird type by basically treating it the same as an unhashed event. hashes = event.get("hashes") - # nb it might be a frozendict or a dict + # nb it might be a immutabledict or a dict if not isinstance(hashes, collections.abc.Mapping): raise SynapseError( 400, "Malformed 'hashes': %s" % (type(hashes),), Codes.UNAUTHORIZED diff --git a/synapse/crypto/keyring.py b/synapse/crypto/keyring.py index d710607c6367..d2f99dc2acd1 100644 --- a/synapse/crypto/keyring.py +++ b/synapse/crypto/keyring.py @@ -721,7 +721,7 @@ async def get_server_verify_key_v2_indirect( ) keys: Dict[str, Dict[str, FetchKeyResult]] = {} - added_keys: List[Tuple[str, str, FetchKeyResult]] = [] + added_keys: Dict[Tuple[str, str], FetchKeyResult] = {} time_now_ms = self.clock.time_msec() @@ -752,9 +752,27 @@ async def get_server_verify_key_v2_indirect( # we continue to process the rest of the response continue - added_keys.extend( - (server_name, key_id, key) for key_id, key in processed_response.items() - ) + for key_id, key in processed_response.items(): + dict_key = (server_name, key_id) + if dict_key in added_keys: + already_present_key = added_keys[dict_key] + logger.warning( + "Duplicate server keys for %s (%s) from perspective %s (%r, %r)", + server_name, + key_id, + perspective_name, + already_present_key, + key, + ) + + if already_present_key.valid_until_ts > key.valid_until_ts: + # Favour the entry with the largest valid_until_ts, + # as `old_verify_keys` are also collected from this + # response. + continue + + added_keys[dict_key] = key + keys.setdefault(server_name, {}).update(processed_response) await self.store.store_server_verify_keys( diff --git a/synapse/event_auth.py b/synapse/event_auth.py index af55874b5c47..25898b95a570 100644 --- a/synapse/event_auth.py +++ b/synapse/event_auth.py @@ -455,8 +455,11 @@ def _check_create(event: "EventBase") -> None: "room appears to have unsupported version %s" % (room_version_prop,), ) - # 1.4 If content has no creator field, reject. - if EventContentFields.ROOM_CREATOR not in event.content: + # 1.4 If content has no creator field, reject if the room version requires it. + if ( + not event.room_version.msc2175_implicit_room_creator + and EventContentFields.ROOM_CREATOR not in event.content + ): raise AuthError(403, "Create event lacks a 'creator' property") @@ -491,7 +494,11 @@ def _is_membership_change_allowed( key = (EventTypes.Create, "") create = auth_events.get(key) if create and event.prev_event_ids()[0] == create.event_id: - if create.content["creator"] == event.state_key: + if room_version.msc2175_implicit_room_creator: + creator = create.sender + else: + creator = create.content[EventContentFields.ROOM_CREATOR] + if creator == event.state_key: return target_user_id = event.state_key @@ -786,7 +793,7 @@ def check_redaction( """Check whether the event sender is allowed to redact the target event. Returns: - True if the the sender is allowed to redact the target event if the + True if the sender is allowed to redact the target event if the target event was created by them. False if the sender is allowed to redact the target event with no further checks. @@ -1004,10 +1011,14 @@ def get_user_power_level(user_id: str, auth_events: StateMap["EventBase"]) -> in # that. key = (EventTypes.Create, "") create_event = auth_events.get(key) - if create_event is not None and create_event.content["creator"] == user_id: - return 100 - else: - return 0 + if create_event is not None: + if create_event.room_version.msc2175_implicit_room_creator: + creator = create_event.sender + else: + creator = create_event.content[EventContentFields.ROOM_CREATOR] + if creator == user_id: + return 100 + return 0 def get_named_level(auth_events: StateMap["EventBase"], name: str, default: int) -> int: diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 91118a8d847f..4501518cf0e5 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -326,7 +326,6 @@ def __init__( hashes: DictProperty[Dict[str, str]] = DictProperty("hashes") origin: DictProperty[str] = DictProperty("origin") origin_server_ts: DictProperty[int] = DictProperty("origin_server_ts") - redacts: DefaultDictProperty[Optional[str]] = DefaultDictProperty("redacts", None) room_id: DictProperty[str] = DictProperty("room_id") sender: DictProperty[str] = DictProperty("sender") # TODO state_key should be Optional[str]. This is generally asserted in Synapse @@ -346,6 +345,13 @@ def event_id(self) -> str: def membership(self) -> str: return self.content["membership"] + @property + def redacts(self) -> Optional[str]: + """MSC2176 moved the redacts field into the content.""" + if self.room_version.msc2176_redaction_rules: + return self.content.get("redacts") + return self.get("redacts") + def is_state(self) -> bool: return self.get_state_key() is not None @@ -462,7 +468,7 @@ def __init__( # Signatures is a dict of dicts, and this is faster than doing a # copy.deepcopy signatures = { - name: {sig_id: sig for sig_id, sig in sigs.items()} + name: dict(sigs.items()) for name, sigs in event_dict.pop("signatures", {}).items() } @@ -510,7 +516,7 @@ def __init__( # Signatures is a dict of dicts, and this is faster than doing a # copy.deepcopy signatures = { - name: {sig_id: sig for sig_id, sig in sigs.items()} + name: dict(sigs.items()) for name, sigs in event_dict.pop("signatures", {}).items() } diff --git a/synapse/events/builder.py b/synapse/events/builder.py index c82745275f94..a254548c6c76 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -173,7 +173,9 @@ async def build( if self.is_state(): event_dict["state_key"] = self._state_key - if self._redacts is not None: + # MSC2174 moves the redacts property to the content, it is invalid to + # provide it as a top-level property. + if self._redacts is not None and not self.room_version.msc2176_redaction_rules: event_dict["redacts"] = self._redacts if self._origin_server_ts is not None: diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index c04ad08cbb61..9b4d692cf478 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -15,7 +15,7 @@ from typing import TYPE_CHECKING, List, Optional, Tuple import attr -from frozendict import frozendict +from immutabledict import immutabledict from synapse.appservice import ApplicationService from synapse.events import EventBase @@ -489,4 +489,4 @@ def _decode_state_dict( if input is None: return None - return frozendict({(etype, state_key): v for etype, state_key, v in input}) + return immutabledict({(etype, state_key): v for etype, state_key, v in input}) diff --git a/synapse/events/utils.py b/synapse/events/utils.py index 1ff05cfdc5da..6413389a0569 100644 --- a/synapse/events/utils.py +++ b/synapse/events/utils.py @@ -106,7 +106,6 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic "depth", "prev_events", "auth_events", - "origin", "origin_server_ts", ] @@ -114,6 +113,10 @@ def prune_event_dict(room_version: RoomVersion, event_dict: JsonDict) -> JsonDic if not room_version.msc2176_redaction_rules: allowed_keys.extend(["prev_state", "membership"]) + # Room versions before MSC3989 kept the origin field. + if not room_version.msc3989_redaction_rules: + allowed_keys.append("origin") + event_type = event_dict["type"] new_content = {} @@ -355,7 +358,7 @@ def serialize_event( time_now_ms = int(time_now_ms) # Should this strip out None's? - d = {k: v for k, v in e.get_dict().items()} + d = dict(e.get_dict().items()) d["event_id"] = e.event_id @@ -580,7 +583,7 @@ def serialize_events( def copy_and_fixup_power_levels_contents( old_power_levels: PowerLevelsContent, ) -> Dict[str, Union[int, Dict[str, int]]]: - """Copy the content of a power_levels event, unfreezing frozendicts along the way. + """Copy the content of a power_levels event, unfreezing immutabledicts along the way. We accept as input power level values which are strings, provided they represent an integer, e.g. `"`100"` instead of 100. Such strings are converted to integers diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 08d601b8cf99..d3f5fc474c1d 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -12,11 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. import collections.abc -from typing import Iterable, Type, Union, cast +from typing import Iterable, List, Type, Union, cast import jsonschema +from pydantic import Field, StrictBool, StrictStr -from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership +from synapse.api.constants import ( + MAX_ALIAS_LENGTH, + EventContentFields, + EventTypes, + Membership, +) from synapse.api.errors import Codes, SynapseError from synapse.api.room_versions import EventFormatVersions from synapse.config.homeserver import HomeServerConfig @@ -28,6 +34,8 @@ validate_canonicaljson, ) from synapse.federation.federation_server import server_matches_acl_event +from synapse.http.servlet import validate_json_object +from synapse.rest.models import RequestBodyModel from synapse.types import EventID, JsonDict, RoomID, UserID @@ -93,27 +101,27 @@ def validate_new(self, event: EventBase, config: HomeServerConfig) -> None: Codes.INVALID_PARAM, ) - if event.type == EventTypes.Retention: + elif event.type == EventTypes.Retention: self._validate_retention(event) - if event.type == EventTypes.ServerACL: + elif event.type == EventTypes.ServerACL: if not server_matches_acl_event(config.server.server_name, event): raise SynapseError( 400, "Can't create an ACL event that denies the local server" ) - if event.type == EventTypes.PowerLevels: + elif event.type == EventTypes.PowerLevels: try: jsonschema.validate( instance=event.content, schema=POWER_LEVELS_SCHEMA, - cls=plValidator, + cls=POWER_LEVELS_VALIDATOR, ) except jsonschema.ValidationError as e: if e.path: # example: "users_default": '0' is not of type 'integer' # cast safety: path entries can be integers, if we fail to validate - # items in an array. However the POWER_LEVELS_SCHEMA doesn't expect + # items in an array. However, the POWER_LEVELS_SCHEMA doesn't expect # to see any arrays. message = ( '"' + cast(str, e.path[-1]) + '": ' + e.message # noqa: B306 @@ -130,6 +138,15 @@ def validate_new(self, event: EventBase, config: HomeServerConfig) -> None: errcode=Codes.BAD_JSON, ) + # If the event contains a mentions key, validate it. + if ( + EventContentFields.MSC3952_MENTIONS in event.content + and config.experimental.msc3952_intentional_mentions + ): + validate_json_object( + event.content[EventContentFields.MSC3952_MENTIONS], Mentions + ) + def _validate_retention(self, event: EventBase) -> None: """Checks that an event that defines the retention policy for a room respects the format enforced by the spec. @@ -258,12 +275,17 @@ def _ensure_state_event(self, event: Union[EventBase, EventBuilder]) -> None: } +class Mentions(RequestBodyModel): + user_ids: List[StrictStr] = Field(default_factory=list) + room: StrictBool = False + + # This could return something newer than Draft 7, but that's the current "latest" # validator. -def _create_power_level_validator() -> Type[jsonschema.Draft7Validator]: - validator = jsonschema.validators.validator_for(POWER_LEVELS_SCHEMA) +def _create_validator(schema: JsonDict) -> Type[jsonschema.Draft7Validator]: + validator = jsonschema.validators.validator_for(schema) - # by default jsonschema does not consider a frozendict to be an object so + # by default jsonschema does not consider a immutabledict to be an object so # we need to use a custom type checker # https://python-jsonschema.readthedocs.io/en/stable/validate/?highlight=object#validating-with-additional-types type_checker = validator.TYPE_CHECKER.redefine( @@ -273,4 +295,4 @@ def _create_power_level_validator() -> Type[jsonschema.Draft7Validator]: return jsonschema.validators.extend(validator, type_checker=type_checker) -plValidator = _create_power_level_validator() +POWER_LEVELS_VALIDATOR = _create_validator(POWER_LEVELS_SCHEMA) diff --git a/synapse/federation/federation_base.py b/synapse/federation/federation_base.py index 29fae716f589..3df975958da2 100644 --- a/synapse/federation/federation_base.py +++ b/synapse/federation/federation_base.py @@ -51,7 +51,7 @@ def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname self.keyring = hs.get_keyring() - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.store = hs.get_datastores().main self._clock = hs.get_clock() self._storage_controllers = hs.get_storage_controllers() @@ -137,9 +137,9 @@ async def _check_sigs_and_hash( ) return redacted_event - spam_check = await self.spam_checker.check_event_for_spam(pdu) + spam_check = await self._spam_checker_module_callbacks.check_event_for_spam(pdu) - if spam_check != self.spam_checker.NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: logger.warning("Event contains spam, soft-failing %s", pdu.event_id) log_kv( { diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 7d04560dca78..4cf4957a42ca 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -61,6 +61,7 @@ event_from_pdu_json, ) from synapse.federation.transport.client import SendJoinResponse +from synapse.http.client import is_unknown_endpoint from synapse.http.types import QueryParams from synapse.logging.opentracing import SynapseTags, log_kv, set_tag, tag_args, trace from synapse.types import JsonDict, UserID, get_domain_from_id @@ -759,43 +760,6 @@ async def get_event_auth( return signed_auth - def _is_unknown_endpoint( - self, e: HttpResponseException, synapse_error: Optional[SynapseError] = None - ) -> bool: - """ - Returns true if the response was due to an endpoint being unimplemented. - - Args: - e: The error response received from the remote server. - synapse_error: The above error converted to a SynapseError. This is - automatically generated if not provided. - - """ - if synapse_error is None: - synapse_error = e.to_synapse_error() - # MSC3743 specifies that servers should return a 404 or 405 with an errcode - # of M_UNRECOGNIZED when they receive a request to an unknown endpoint or - # to an unknown method, respectively. - # - # Older versions of servers don't properly handle this. This needs to be - # rather specific as some endpoints truly do return 404 errors. - return ( - # 404 is an unknown endpoint, 405 is a known endpoint, but unknown method. - (e.code == 404 or e.code == 405) - and ( - # Older Dendrites returned a text or empty body. - # Older Conduit returned an empty body. - not e.response - or e.response == b"404 page not found" - # The proper response JSON with M_UNRECOGNIZED errcode. - or synapse_error.errcode == Codes.UNRECOGNIZED - ) - ) or ( - # Older Synapses returned a 400 error. - e.code == 400 - and synapse_error.errcode == Codes.UNRECOGNIZED - ) - async def _try_destination_list( self, description: str, @@ -887,7 +851,7 @@ async def _try_destination_list( elif 400 <= e.code < 500 and synapse_error.errcode in failover_errcodes: failover = True - elif failover_on_unknown_endpoint and self._is_unknown_endpoint( + elif failover_on_unknown_endpoint and is_unknown_endpoint( e, synapse_error ): failover = True @@ -1223,7 +1187,7 @@ async def _do_send_join( # If an error is received that is due to an unrecognised endpoint, # fallback to the v1 endpoint. Otherwise, consider it a legitimate error # and raise. - if not self._is_unknown_endpoint(e): + if not is_unknown_endpoint(e): raise logger.debug("Couldn't send_join with the v2 API, falling back to the v1 API") @@ -1297,7 +1261,7 @@ async def _do_send_invite( # fallback to the v1 endpoint if the room uses old-style event IDs. # Otherwise, consider it a legitimate error and raise. err = e.to_synapse_error() - if self._is_unknown_endpoint(e, err): + if is_unknown_endpoint(e, err): if room_version.event_format != EventFormatVersions.ROOM_V1_V2: raise SynapseError( 400, @@ -1358,7 +1322,7 @@ async def _do_send_leave(self, destination: str, pdu: EventBase) -> JsonDict: # If an error is received that is due to an unrecognised endpoint, # fallback to the v1 endpoint. Otherwise, consider it a legitimate error # and raise. - if not self._is_unknown_endpoint(e): + if not is_unknown_endpoint(e): raise logger.debug("Couldn't send_leave with the v2 API, falling back to the v1 API") @@ -1629,7 +1593,7 @@ async def send_request( # If an error is received that is due to an unrecognised endpoint, # fallback to the unstable endpoint. Otherwise, consider it a # legitimate error and raise. - if not self._is_unknown_endpoint(e): + if not is_unknown_endpoint(e): raise logger.debug( diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 6d99845de561..d7740eb3b448 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -86,7 +86,7 @@ from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary from synapse.storage.roommember import MemberSummary from synapse.types import JsonDict, StateMap, get_domain_from_id -from synapse.util import json_decoder, unwrapFirstError +from synapse.util import unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results from synapse.util.caches.response_cache import ResponseCache from synapse.util.stringutils import parse_server_name @@ -130,11 +130,12 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.handler = hs.get_federation_handler() - self._spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self._federation_event_handler = hs.get_federation_event_handler() self.state = hs.get_state_handler() self._event_auth_handler = hs.get_event_auth_handler() self._room_member_handler = hs.get_room_member_handler() + self._e2e_keys_handler = hs.get_e2e_keys_handler() self._state_storage_controller = hs.get_storage_controllers().state @@ -1012,15 +1013,14 @@ async def on_claim_client_keys( query.append((user_id, device_id, algorithm)) log_kv({"message": "Claiming one time keys.", "user, device pairs": query}) - results = await self.store.claim_e2e_one_time_keys(query) + results = await self._e2e_keys_handler.claim_local_one_time_keys(query) - json_result: Dict[str, Dict[str, dict]] = {} - for user_id, device_keys in results.items(): - for device_id, keys in device_keys.items(): - for key_id, json_str in keys.items(): - json_result.setdefault(user_id, {})[device_id] = { - key_id: json_decoder.decode(json_str) - } + json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + for result in results: + for user_id, device_keys in result.items(): + for device_id, keys in device_keys.items(): + for key_id, key in keys.items(): + json_result.setdefault(user_id, {})[device_id] = {key_id: key} logger.info( "Claimed one-time-keys: %s", @@ -1129,7 +1129,7 @@ async def _handle_received_pdu(self, origin: str, pdu: EventBase) -> None: logger.warning("event id %s: %s", pdu.event_id, e) raise FederationError("ERROR", 403, str(e), affected=pdu.event_id) - if await self._spam_checker.should_drop_federated_event(pdu): + if await self._spam_checker_module_callbacks.should_drop_federated_event(pdu): logger.warning( "Unstaged federated event contains spam, dropping %s", pdu.event_id ) @@ -1174,7 +1174,9 @@ async def _get_next_nonspam_staged_event_for_room( origin, event = next - if await self._spam_checker.should_drop_federated_event(event): + if await self._spam_checker_module_callbacks.should_drop_federated_event( + event + ): logger.warning( "Staged federated event contains spam, dropping %s", event.event_id, diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index 3063df799099..0b7c81677ebd 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -244,7 +244,7 @@ def send_presence_to_destinations( self.notifier.on_new_replication_data() - def send_device_messages(self, destination: str, immediate: bool = False) -> None: + def send_device_messages(self, destination: str, immediate: bool = True) -> None: """As per FederationSender""" # We don't need to replicate this as it gets sent down a different # stream. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 902214f6513e..b09438d5cdbc 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -11,6 +11,119 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +""" +The Federation Sender is responsible for sending Persistent Data Units (PDUs) +and Ephemeral Data Units (EDUs) to other homeservers using +the `/send` Federation API. + + +## How do PDUs get sent? + +The Federation Sender is made aware of new PDUs due to `FederationSender.notify_new_events`. +When the sender is notified about a newly-persisted PDU that originates from this homeserver +and is not an out-of-band event, we pass the PDU to the `_PerDestinationQueue` for each +remote homeserver that is in the room at that point in the DAG. + + +### Per-Destination Queues + +There is one `PerDestinationQueue` per 'destination' homeserver. +The `PerDestinationQueue` maintains the following information about the destination: + +- whether the destination is currently in [catch-up mode (see below)](#catch-up-mode); +- a queue of PDUs to be sent to the destination; and +- a queue of EDUs to be sent to the destination (not considered in this section). + +Upon a new PDU being enqueued, `attempt_new_transaction` is called to start a new +transaction if there is not already one in progress. + + +### Transactions and the Transaction Transmission Loop + +Each federation HTTP request to the `/send` endpoint is referred to as a 'transaction'. +The body of the HTTP request contains a list of PDUs and EDUs to send to the destination. + +The *Transaction Transmission Loop* (`_transaction_transmission_loop`) is responsible +for emptying the queued PDUs (and EDUs) from a `PerDestinationQueue` by sending +them to the destination. + +There can only be one transaction in flight for a given destination at any time. +(Other than preventing us from overloading the destination, this also makes it easier to +reason about because we process events sequentially for each destination. +This is useful for *Catch-Up Mode*, described later.) + +The loop continues so long as there is anything to send. At each iteration of the loop, we: + +- dequeue up to 50 PDUs (and up to 100 EDUs). +- make the `/send` request to the destination homeserver with the dequeued PDUs and EDUs. +- if successful, make note of the fact that we succeeded in transmitting PDUs up to + the given `stream_ordering` of the latest PDU by +- if unsuccessful, back off from the remote homeserver for some time. + If we have been unsuccessful for too long (when the backoff interval grows to exceed 1 hour), + the in-memory queues are emptied and we enter [*Catch-Up Mode*, described below](#catch-up-mode). + + +### Catch-Up Mode + +When the `PerDestinationQueue` has the catch-up flag set, the *Catch-Up Transmission Loop* +(`_catch_up_transmission_loop`) is used in lieu of the regular `_transaction_transmission_loop`. +(Only once the catch-up mode has been exited can the regular tranaction transmission behaviour +be resumed.) + +*Catch-Up Mode*, entered upon Synapse startup or once a homeserver has fallen behind due to +connection problems, is responsible for sending PDUs that have been missed by the destination +homeserver. (PDUs can be missed because the `PerDestinationQueue` is volatile — i.e. resets +on startup — and it does not hold PDUs forever if `/send` requests to the destination fail.) + +The catch-up mechanism makes use of the `last_successful_stream_ordering` column in the +`destinations` table (which gives the `stream_ordering` of the most recent successfully +sent PDU) and the `stream_ordering` column in the `destination_rooms` table (which gives, +for each room, the `stream_ordering` of the most recent PDU that needs to be sent to this +destination). + +Each iteration of the loop pulls out 50 `destination_rooms` entries with the oldest +`stream_ordering`s that are greater than the `last_successful_stream_ordering`. +In other words, from the set of latest PDUs in each room to be sent to the destination, +the 50 oldest such PDUs are pulled out. + +These PDUs could, in principle, now be directly sent to the destination. However, as an +optimisation intended to prevent overloading destination homeservers, we instead attempt +to send the latest forward extremities so long as the destination homeserver is still +eligible to receive those. +This reduces load on the destination **in aggregate** because all Synapse homeservers +will behave according to this principle and therefore avoid sending lots of different PDUs +at different points in the DAG to a recovering homeserver. +*This optimisation is not currently valid in rooms which are partial-state on this homeserver, +since we are unable to determine whether the destination homeserver is eligible to receive +the latest forward extremities unless this homeserver sent those PDUs — in this case, we +just send the latest PDUs originating from this server and skip this optimisation.* + +Whilst PDUs are sent through this mechanism, the position of `last_successful_stream_ordering` +is advanced as normal. +Once there are no longer any rooms containing outstanding PDUs to be sent to the destination +*that are not already in the `PerDestinationQueue` because they arrived since Catch-Up Mode +was enabled*, Catch-Up Mode is exited and we return to `_transaction_transmission_loop`. + + +#### A note on failures and back-offs + +If a remote server is unreachable over federation, we back off from that server, +with an exponentially-increasing retry interval. +Whilst we don't automatically retry after the interval, we prevent making new attempts +until such time as the back-off has cleared. +Once the back-off is cleared and a new PDU or EDU arrives for transmission, the transmission +loop resumes and empties the queue by making federation requests. + +If the backoff grows too large (> 1 hour), the in-memory queue is emptied (to prevent +unbounded growth) and Catch-Up Mode is entered. + +It is worth noting that the back-off for a remote server is cleared once an inbound +request from that remote server is received (see `notify_remote_server_up`). +At this point, the transaction transmission loop is also started up, to proactively +send missed PDUs and EDUs to the destination (i.e. you don't need to wait for a new PDU +or EDU, destined for that destination, to be created in order to send out missed PDUs and +EDUs). +""" import abc import logging @@ -790,7 +903,7 @@ def send_edu(self, edu: Edu, key: Optional[Hashable]) -> None: else: queue.send_edu(edu) - def send_device_messages(self, destination: str, immediate: bool = False) -> None: + def send_device_messages(self, destination: str, immediate: bool = True) -> None: if destination == self.server_name: logger.warning("Not sending device update to ourselves") return diff --git a/synapse/federation/transport/server/__init__.py b/synapse/federation/transport/server/__init__.py index 2725f53cf6d9..753372fc5476 100644 --- a/synapse/federation/transport/server/__init__.py +++ b/synapse/federation/transport/server/__init__.py @@ -108,6 +108,7 @@ class PublicRoomList(BaseFederationServlet): """ PATH = "/publicRooms" + CATEGORY = "Federation requests" def __init__( self, @@ -212,6 +213,7 @@ class OpenIdUserInfo(BaseFederationServlet): """ PATH = "/openid/userinfo" + CATEGORY = "Federation requests" REQUIRE_AUTH = False diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index f7ca87adc491..ec5b5eeafa29 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -70,6 +70,7 @@ def __init__( class FederationSendServlet(BaseFederationServerServlet): PATH = "/send/(?P[^/]*)/?" + CATEGORY = "Inbound federation transaction request" # We ratelimit manually in the handler as we queue up the requests and we # don't want to fill up the ratelimiter with blocked requests. @@ -138,6 +139,7 @@ async def on_PUT( class FederationEventServlet(BaseFederationServerServlet): PATH = "/event/(?P[^/]*)/?" + CATEGORY = "Federation requests" # This is when someone asks for a data item for a given server data_id pair. async def on_GET( @@ -152,6 +154,7 @@ async def on_GET( class FederationStateV1Servlet(BaseFederationServerServlet): PATH = "/state/(?P[^/]*)/?" + CATEGORY = "Federation requests" # This is when someone asks for all data for a given room. async def on_GET( @@ -170,6 +173,7 @@ async def on_GET( class FederationStateIdsServlet(BaseFederationServerServlet): PATH = "/state_ids/(?P[^/]*)/?" + CATEGORY = "Federation requests" async def on_GET( self, @@ -187,6 +191,7 @@ async def on_GET( class FederationBackfillServlet(BaseFederationServerServlet): PATH = "/backfill/(?P[^/]*)/?" + CATEGORY = "Federation requests" async def on_GET( self, @@ -225,6 +230,7 @@ class FederationTimestampLookupServlet(BaseFederationServerServlet): """ PATH = "/timestamp_to_event/(?P[^/]*)/?" + CATEGORY = "Federation requests" async def on_GET( self, @@ -246,6 +252,7 @@ async def on_GET( class FederationQueryServlet(BaseFederationServerServlet): PATH = "/query/(?P[^/]*)" + CATEGORY = "Federation requests" # This is when we receive a server-server Query async def on_GET( @@ -262,6 +269,7 @@ async def on_GET( class FederationMakeJoinServlet(BaseFederationServerServlet): PATH = "/make_join/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_GET( self, @@ -297,6 +305,7 @@ async def on_GET( class FederationMakeLeaveServlet(BaseFederationServerServlet): PATH = "/make_leave/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_GET( self, @@ -312,6 +321,7 @@ async def on_GET( class FederationV1SendLeaveServlet(BaseFederationServerServlet): PATH = "/send_leave/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_PUT( self, @@ -327,6 +337,7 @@ async def on_PUT( class FederationV2SendLeaveServlet(BaseFederationServerServlet): PATH = "/send_leave/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" PREFIX = FEDERATION_V2_PREFIX @@ -344,6 +355,7 @@ async def on_PUT( class FederationMakeKnockServlet(BaseFederationServerServlet): PATH = "/make_knock/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_GET( self, @@ -366,6 +378,7 @@ async def on_GET( class FederationV1SendKnockServlet(BaseFederationServerServlet): PATH = "/send_knock/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_PUT( self, @@ -381,6 +394,7 @@ async def on_PUT( class FederationEventAuthServlet(BaseFederationServerServlet): PATH = "/event_auth/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_GET( self, @@ -395,6 +409,7 @@ async def on_GET( class FederationV1SendJoinServlet(BaseFederationServerServlet): PATH = "/send_join/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_PUT( self, @@ -412,6 +427,7 @@ async def on_PUT( class FederationV2SendJoinServlet(BaseFederationServerServlet): PATH = "/send_join/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" PREFIX = FEDERATION_V2_PREFIX @@ -455,6 +471,7 @@ async def on_PUT( class FederationV1InviteServlet(BaseFederationServerServlet): PATH = "/invite/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_PUT( self, @@ -479,6 +496,7 @@ async def on_PUT( class FederationV2InviteServlet(BaseFederationServerServlet): PATH = "/invite/(?P[^/]*)/(?P[^/]*)" + CATEGORY = "Federation requests" PREFIX = FEDERATION_V2_PREFIX @@ -515,6 +533,7 @@ async def on_PUT( class FederationThirdPartyInviteExchangeServlet(BaseFederationServerServlet): PATH = "/exchange_third_party_invite/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_PUT( self, @@ -529,6 +548,7 @@ async def on_PUT( class FederationClientKeysQueryServlet(BaseFederationServerServlet): PATH = "/user/keys/query" + CATEGORY = "Federation requests" async def on_POST( self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]] @@ -538,6 +558,7 @@ async def on_POST( class FederationUserDevicesQueryServlet(BaseFederationServerServlet): PATH = "/user/devices/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_GET( self, @@ -551,6 +572,7 @@ async def on_GET( class FederationClientKeysClaimServlet(BaseFederationServerServlet): PATH = "/user/keys/claim" + CATEGORY = "Federation requests" async def on_POST( self, origin: str, content: JsonDict, query: Dict[bytes, List[bytes]] @@ -561,6 +583,7 @@ async def on_POST( class FederationGetMissingEventsServlet(BaseFederationServerServlet): PATH = "/get_missing_events/(?P[^/]*)" + CATEGORY = "Federation requests" async def on_POST( self, @@ -586,6 +609,7 @@ async def on_POST( class On3pidBindServlet(BaseFederationServerServlet): PATH = "/3pid/onbind" + CATEGORY = "Federation requests" REQUIRE_AUTH = False @@ -618,6 +642,7 @@ async def on_POST( class FederationVersionServlet(BaseFederationServlet): PATH = "/version" + CATEGORY = "Federation requests" REQUIRE_AUTH = False @@ -640,6 +665,7 @@ async def on_GET( class FederationRoomHierarchyServlet(BaseFederationServlet): PATH = "/hierarchy/(?P[^/]*)" + CATEGORY = "Federation requests" def __init__( self, @@ -672,6 +698,7 @@ class RoomComplexityServlet(BaseFederationServlet): PATH = "/rooms/(?P[^/]*)/complexity" PREFIX = FEDERATION_UNSTABLE_PREFIX + CATEGORY = "Federation requests (unstable)" def __init__( self, diff --git a/synapse/handlers/appservice.py b/synapse/handlers/appservice.py index ec3ab968e925..da887647d4d8 100644 --- a/synapse/handlers/appservice.py +++ b/synapse/handlers/appservice.py @@ -12,7 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. import logging -from typing import TYPE_CHECKING, Collection, Dict, Iterable, List, Optional, Union +from typing import ( + TYPE_CHECKING, + Collection, + Dict, + Iterable, + List, + Mapping, + Optional, + Tuple, + Union, +) from prometheus_client import Counter @@ -829,3 +839,126 @@ async def _check_user_exists(self, user_id: str) -> bool: if unknown_user: return await self.query_user_exists(user_id) return True + + async def claim_e2e_one_time_keys( + self, query: Iterable[Tuple[str, str, str]] + ) -> Tuple[ + Iterable[Dict[str, Dict[str, Dict[str, JsonDict]]]], List[Tuple[str, str, str]] + ]: + """Claim one time keys from application services. + + Users which are exclusively owned by an application service are sent a + key claim request to check if the application service provides keys + directly. + + Args: + query: An iterable of tuples of (user ID, device ID, algorithm). + + Returns: + A tuple of: + An iterable of maps of user ID -> a map device ID -> a map of key ID -> JSON bytes. + + A copy of the input which has not been fulfilled (either because + they are not appservice users or the appservice does not support + providing OTKs). + """ + services = self.store.get_app_services() + + # Partition the users by appservice. + query_by_appservice: Dict[str, List[Tuple[str, str, str]]] = {} + missing = [] + for user_id, device, algorithm in query: + if not self.store.get_if_app_services_interested_in_user(user_id): + missing.append((user_id, device, algorithm)) + continue + + # Find the associated appservice. + for service in services: + if service.is_exclusive_user(user_id): + query_by_appservice.setdefault(service.id, []).append( + (user_id, device, algorithm) + ) + continue + + # Query each service in parallel. + results = await make_deferred_yieldable( + defer.DeferredList( + [ + run_in_background( + self.appservice_api.claim_client_keys, + # We know this must be an app service. + self.store.get_app_service_by_id(service_id), # type: ignore[arg-type] + service_query, + ) + for service_id, service_query in query_by_appservice.items() + ], + consumeErrors=True, + ) + ) + + # Patch together the results -- they are all independent (since they + # require exclusive control over the users). They get returned as a list + # and the caller combines them. + claimed_keys: List[Dict[str, Dict[str, Dict[str, JsonDict]]]] = [] + for success, result in results: + if success: + claimed_keys.append(result[0]) + missing.extend(result[1]) + + return claimed_keys, missing + + async def query_keys( + self, query: Mapping[str, Optional[List[str]]] + ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + """Query application services for device keys. + + Users which are exclusively owned by an application service are queried + for keys to check if the application service provides keys directly. + + Args: + query: map from user_id to a list of devices to query + + Returns: + A map from user_id -> device_id -> device details + """ + services = self.store.get_app_services() + + # Partition the users by appservice. + query_by_appservice: Dict[str, Dict[str, List[str]]] = {} + for user_id, device_ids in query.items(): + if not self.store.get_if_app_services_interested_in_user(user_id): + continue + + # Find the associated appservice. + for service in services: + if service.is_exclusive_user(user_id): + query_by_appservice.setdefault(service.id, {})[user_id] = ( + device_ids or [] + ) + continue + + # Query each service in parallel. + results = await make_deferred_yieldable( + defer.DeferredList( + [ + run_in_background( + self.appservice_api.query_keys, + # We know this must be an app service. + self.store.get_app_service_by_id(service_id), # type: ignore[arg-type] + service_query, + ) + for service_id, service_query in query_by_appservice.items() + ], + consumeErrors=True, + ) + ) + + # Patch together the results -- they are all independent (since they + # require exclusive control over the users). They get returned as a single + # dictionary. + key_queries: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + for success, result in results: + if success: + key_queries.update(result) + + return key_queries diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index c11c717c2633..8e66e9914a44 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -1508,8 +1508,10 @@ async def delete_access_token(self, access_token: str) -> None: ) # delete pushers associated with this access token + # XXX(quenting): This is only needed until the 'set_device_id_for_pushers' + # background update completes. if token.token_id is not None: - await self.hs.get_pusherpool().remove_pushers_by_access_token( + await self.hs.get_pusherpool().remove_pushers_by_access_tokens( token.user_id, (token.token_id,) ) @@ -1539,7 +1541,9 @@ async def delete_access_tokens_for_user( ) # delete pushers associated with the access tokens - await self.hs.get_pusherpool().remove_pushers_by_access_token( + # XXX(quenting): This is only needed until the 'set_device_id_for_pushers' + # background update completes. + await self.hs.get_pusherpool().remove_pushers_by_access_tokens( user_id, (token_id for _, token_id, _ in tokens_and_devices) ) diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index d31263c717b5..bd5867491baf 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -176,6 +176,9 @@ async def deactivate_account( # Remove account data (including ignored users and push rules). await self.store.purge_account_data_for_user(user_id) + # Delete any server-side backup keys + await self.store.bulk_delete_backup_keys_and_versions_for_user(user_id) + # Let modules know the user has been deactivated. await self._third_party_rules.on_user_deactivation_status_changed( user_id, diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 6f7963df43ae..ae1d9337adc5 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -215,6 +215,16 @@ async def get_user_ids_changed( possibly_changed = set(changed) possibly_left = set() for room_id in rooms_changed: + # Check if the forward extremities have changed. If not then we know + # the current state won't have changed, and so we can skip this room. + try: + if not await self.store.have_room_forward_extremities_changed_since( + room_id, stream_ordering + ): + continue + except errors.StoreError: + pass + current_state_ids = await self._state_storage.get_current_state_ids( room_id, await_full_state=False ) @@ -521,6 +531,10 @@ async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: f"org.matrix.msc3890.local_notification_settings.{device_id}", ) + # Pushers are deleted after `delete_access_tokens_for_user` is called so that + # modules using `on_logged_out` hook can use them if needed. + await self.hs.get_pusherpool().remove_pushers_by_devices(user_id, device_ids) + await self.notify_device_update(user_id, device_ids) async def update_device(self, user_id: str, device_id: str, content: dict) -> None: diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 1fb23cc9bf51..5e8316e2e511 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -60,7 +60,7 @@ def __init__(self, hs: "HomeServer"): "directory", self.on_directory_query ) - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker async def _create_association( self, @@ -145,10 +145,12 @@ async def create_association( 403, "You must be in the room to create an alias for it" ) - spam_check = await self.spam_checker.user_may_create_room_alias( - user_id, room_alias + spam_check = ( + await self._spam_checker_module_callbacks.user_may_create_room_alias( + user_id, room_alias + ) ) - if spam_check != self.spam_checker.NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: raise AuthError( 403, "This user is not permitted to create this alias", @@ -444,7 +446,9 @@ async def edit_published_room_list( """ user_id = requester.user.to_string() - spam_check = await self.spam_checker.user_may_publish_room(user_id, room_id) + spam_check = await self._spam_checker_module_callbacks.user_may_publish_room( + user_id, room_id + ) if spam_check != NOT_SPAM: raise AuthError( 403, diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 4e9c8d8db0c7..007366747014 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -13,7 +13,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - import logging from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Mapping, Optional, Tuple @@ -53,6 +52,7 @@ def __init__(self, hs: "HomeServer"): self.store = hs.get_datastores().main self.federation = hs.get_federation_client() self.device_handler = hs.get_device_handler() + self._appservice_handler = hs.get_application_service_handler() self.is_mine = hs.is_mine self.clock = hs.get_clock() @@ -88,6 +88,13 @@ def __init__(self, hs: "HomeServer"): max_count=10, ) + self._query_appservices_for_otks = ( + hs.config.experimental.msc3983_appservice_otk_claims + ) + self._query_appservices_for_keys = ( + hs.config.experimental.msc3984_appservice_key_query + ) + @trace @cancellable async def query_devices( @@ -493,6 +500,19 @@ async def query_local_devices( local_query, include_displaynames ) + # Check if the application services have any additional results. + if self._query_appservices_for_keys: + # Query the appservices for any keys. + appservice_results = await self._appservice_handler.query_keys(query) + + # Merge results, overriding with what the appservice returned. + for user_id, devices in appservice_results.get("device_keys", {}).items(): + # Copy the appservice device info over the homeserver device info, but + # don't completely overwrite it. + results.setdefault(user_id, {}).update(devices) + + # TODO Handle cross-signing keys. + # Build the result structure for user_id, device_keys in results.items(): for device_id, device_info in device_keys.items(): @@ -542,6 +562,42 @@ async def on_federation_query_client_keys( return ret + async def claim_local_one_time_keys( + self, local_query: List[Tuple[str, str, str]] + ) -> Iterable[Dict[str, Dict[str, Dict[str, JsonDict]]]]: + """Claim one time keys for local users. + + 1. Attempt to claim OTKs from the database. + 2. Ask application services if they provide OTKs. + 3. Attempt to fetch fallback keys from the database. + + Args: + local_query: An iterable of tuples of (user ID, device ID, algorithm). + + Returns: + An iterable of maps of user ID -> a map device ID -> a map of key ID -> JSON bytes. + """ + + otk_results, not_found = await self.store.claim_e2e_one_time_keys(local_query) + + # If the application services have not provided any keys via the C-S + # API, query it directly for one-time keys. + if self._query_appservices_for_otks: + ( + appservice_results, + not_found, + ) = await self._appservice_handler.claim_e2e_one_time_keys(not_found) + else: + appservice_results = [] + + # For each user that does not have a one-time keys available, see if + # there is a fallback key. + fallback_results = await self.store.claim_e2e_fallback_keys(not_found) + + # Return the results in order, each item from the input query should + # only appear once in the combined list. + return (otk_results, *appservice_results, fallback_results) + @trace async def claim_one_time_keys( self, query: Dict[str, Dict[str, Dict[str, str]]], timeout: Optional[int] @@ -561,17 +617,18 @@ async def claim_one_time_keys( set_tag("local_key_query", str(local_query)) set_tag("remote_key_query", str(remote_queries)) - results = await self.store.claim_e2e_one_time_keys(local_query) + results = await self.claim_local_one_time_keys(local_query) # A map of user ID -> device ID -> key ID -> key. json_result: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + for result in results: + for user_id, device_keys in result.items(): + for device_id, keys in device_keys.items(): + for key_id, key in keys.items(): + json_result.setdefault(user_id, {})[device_id] = {key_id: key} + + # Remote failures. failures: Dict[str, JsonDict] = {} - for user_id, device_keys in results.items(): - for device_id, keys in device_keys.items(): - for key_id, json_str in keys.items(): - json_result.setdefault(user_id, {})[device_id] = { - key_id: json_decoder.decode(json_str) - } @trace async def claim_client_keys(destination: str) -> None: diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 80156ef343aa..d1a88cc60460 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -141,7 +141,7 @@ def __init__(self, hs: "HomeServer"): self.server_name = hs.hostname self.keyring = hs.get_keyring() self.is_mine_id = hs.is_mine_id - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.event_creation_handler = hs.get_event_creation_handler() self.event_builder_factory = hs.get_event_builder_factory() self._event_auth_handler = hs.get_event_auth_handler() @@ -1042,7 +1042,7 @@ async def on_invite_request( if self.hs.config.server.block_non_admin_invites: raise SynapseError(403, "This server does not accept room invites") - spam_check = await self.spam_checker.user_may_invite( + spam_check = await self._spam_checker_module_callbacks.user_may_invite( event.sender, event.state_key, event.room_id ) if spam_check != NOT_SPAM: @@ -1949,27 +1949,25 @@ async def _sync_partial_state_room( ) for event in events: for attempt in itertools.count(): + # We try a new destination on every iteration. try: - await self._federation_event_handler.update_state_for_partial_state_event( - destination, event - ) + while True: + try: + await self._federation_event_handler.update_state_for_partial_state_event( + destination, event + ) + break + except FederationPullAttemptBackoffError as e: + # We are in the backoff period for one of the event's + # prev_events. Wait it out and try again after. + logger.warning( + "%s; waiting for %d ms...", e, e.retry_after_ms + ) + await self.clock.sleep(e.retry_after_ms / 1000) + + # Success, no need to try the rest of the destinations. break - except FederationPullAttemptBackoffError as exc: - # Log a warning about why we failed to process the event (the error message - # for `FederationPullAttemptBackoffError` is pretty good) - logger.warning("_sync_partial_state_room: %s", exc) - # We do not record a failed pull attempt when we backoff fetching a missing - # `prev_event` because not being able to fetch the `prev_events` just means - # we won't be able to de-outlier the pulled event. But we can still use an - # `outlier` in the state/auth chain for another event. So we shouldn't stop - # a downstream event from trying to pull it. - # - # This avoids a cascade of backoff for all events in the DAG downstream from - # one event backoff upstream. except FederationError as e: - # TODO: We should `record_event_failed_pull_attempt` here, - # see https://github.com/matrix-org/synapse/issues/13700 - if attempt == len(destinations) - 1: # We have tried every remote server for this event. Give up. # TODO(faster_joins) giving up isn't the right thing to do @@ -1986,6 +1984,8 @@ async def _sync_partial_state_room( destination, e, ) + # TODO: We should `record_event_failed_pull_attempt` here, + # see https://github.com/matrix-org/synapse/issues/13700 raise # Try the next remote server. diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index b7136f8d1ccb..8d5be81a9207 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -140,6 +140,7 @@ class FederationEventHandler: """ def __init__(self, hs: "HomeServer"): + self._clock = hs.get_clock() self._store = hs.get_datastores().main self._storage_controllers = hs.get_storage_controllers() self._state_storage_controller = self._storage_controllers.state @@ -583,7 +584,7 @@ async def process_remote_join( await self._check_event_auth(origin, event, context) if context.rejected: - raise SynapseError(400, "Join event was rejected") + raise SynapseError(403, "Join event was rejected") # the remote server is responsible for sending our join event to the rest # of the federation. Indeed, attempting to do so will result in problems @@ -1038,8 +1039,8 @@ async def _compute_event_context_with_maybe_missing_prevs( Raises: FederationPullAttemptBackoffError if we are are deliberately not attempting - to pull the given event over federation because we've already done so - recently and are backing off. + to pull one of the given event's `prev_event`s over federation because + we've already done so recently and are backing off. FederationError if we fail to get the state from the remote server after any missing `prev_event`s. """ @@ -1053,13 +1054,22 @@ async def _compute_event_context_with_maybe_missing_prevs( # If we've already recently attempted to pull this missing event, don't # try it again so soon. Since we have to fetch all of the prev_events, we can # bail early here if we find any to ignore. - prevs_to_ignore = await self._store.get_event_ids_to_not_pull_from_backoff( - room_id, missing_prevs + prevs_with_pull_backoff = ( + await self._store.get_event_ids_to_not_pull_from_backoff( + room_id, missing_prevs + ) ) - if len(prevs_to_ignore) > 0: + if len(prevs_with_pull_backoff) > 0: raise FederationPullAttemptBackoffError( - event_ids=prevs_to_ignore, - message=f"While computing context for event={event_id}, not attempting to pull missing prev_event={prevs_to_ignore[0]} because we already tried to pull recently (backing off).", + event_ids=prevs_with_pull_backoff.keys(), + message=( + f"While computing context for event={event_id}, not attempting to " + f"pull missing prev_events={list(prevs_with_pull_backoff.keys())} " + "because we already tried to pull recently (backing off)." + ), + retry_after_ms=( + max(prevs_with_pull_backoff.values()) - self._clock.time_msec() + ), ) if not missing_prevs: @@ -1505,7 +1515,10 @@ async def _handle_marker_event(self, origin: str, marker_event: EventBase) -> No # support it or the event is not from the room creator. room_version = await self._store.get_room_version(marker_event.room_id) create_event = await self._store.get_create_event_for_room(marker_event.room_id) - room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) + if not room_version.msc2175_implicit_room_creator: + room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) + else: + room_creator = create_event.sender if not room_version.msc2716_historical and ( not self._config.experimental.msc2716_enabled or marker_event.sender != room_creator diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index ecd05c564212..521ad9df5def 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -508,7 +508,7 @@ def __init__(self, hs: "HomeServer"): self._bulk_push_rule_evaluator = hs.get_bulk_push_rule_evaluator() - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.third_party_event_rules: "ThirdPartyEventRules" = ( self.hs.get_third_party_event_rules() ) @@ -1035,8 +1035,12 @@ async def create_and_send_nonmember_event( event.sender, ) - spam_check_result = await self.spam_checker.check_event_for_spam(event) - if spam_check_result != self.spam_checker.NOT_SPAM: + spam_check_result = ( + await self._spam_checker_module_callbacks.check_event_for_spam( + event + ) + ) + if spam_check_result != self._spam_checker_module_callbacks.NOT_SPAM: if isinstance(spam_check_result, tuple): try: [code, dict] = spam_check_result @@ -1922,7 +1926,12 @@ async def persist_and_notify_client_events( room_version_obj = KNOWN_ROOM_VERSIONS[room_version] create_event = await self.store.get_create_event_for_room(event.room_id) - room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) + if not room_version_obj.msc2175_implicit_room_creator: + room_creator = create_event.content.get( + EventContentFields.ROOM_CREATOR + ) + else: + room_creator = create_event.sender # Only check an insertion event if the room version # supports it or the event is from the room creator. diff --git a/synapse/handlers/oidc.py b/synapse/handlers/oidc.py index 0fc829acf77d..e7e0b5e049b3 100644 --- a/synapse/handlers/oidc.py +++ b/synapse/handlers/oidc.py @@ -1239,6 +1239,7 @@ async def grandfather_existing_users() -> Optional[str]: grandfather_existing_users, extra_attributes, auth_provider_session_id=sid, + registration_enabled=self._config.enable_registration, ) def _remote_id_from_userinfo(self, userinfo: UserInfo) -> str: diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 6b110dcb6e4d..61c4b833bd30 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -110,7 +110,7 @@ def __init__(self, hs: "HomeServer"): self._server_notices_mxid = hs.config.servernotices.server_notices_mxid self._server_name = hs.hostname - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker if hs.config.worker.worker_app: self._register_client = ReplicationRegisterServlet.make_client(hs) @@ -259,7 +259,7 @@ async def register_user( await self.check_registration_ratelimit(address) - result = await self.spam_checker.check_registration_for_spam( + result = await self._spam_checker_module_callbacks.check_registration_for_spam( threepid, localpart, user_agent_ips or [], @@ -1013,11 +1013,11 @@ async def _register_email_threepid( user_tuple = await self.store.get_user_by_access_token(token) # The token better still exist. assert user_tuple - token_id = user_tuple.token_id + device_id = user_tuple.device_id await self.pusher_pool.add_or_update_pusher( user_id=user_id, - access_token=token_id, + device_id=device_id, kind="email", app_id="m.email", app_display_name="Email Notifications", diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index e326515c2441..766562a987ec 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -118,7 +118,7 @@ def __init__(self, hs: "HomeServer"): self.auth_blocking = hs.get_auth_blocking() self.clock = hs.get_clock() self.hs = hs - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.event_creation_handler = hs.get_event_creation_handler() self.room_member_handler = hs.get_room_member_handler() self._event_auth_handler = hs.get_event_auth_handler() @@ -465,7 +465,9 @@ async def clone_existing_room( """ user_id = requester.user.to_string() - spam_check = await self.spam_checker.user_may_create_room(user_id) + spam_check = await self._spam_checker_module_callbacks.user_may_create_room( + user_id + ) if spam_check != NOT_SPAM: raise SynapseError( 403, @@ -583,6 +585,7 @@ async def clone_existing_room( await self._send_events_for_new_room( requester, new_room_id, + new_room_version, # we expect to override all the presets with initial_state, so this is # somewhat arbitrary. room_config={"preset": RoomCreationPreset.PRIVATE_CHAT}, @@ -776,7 +779,9 @@ async def create_room( ) if not is_requester_admin: - spam_check = await self.spam_checker.user_may_create_room(user_id) + spam_check = await self._spam_checker_module_callbacks.user_may_create_room( + user_id + ) if spam_check != NOT_SPAM: raise SynapseError( 403, @@ -938,6 +943,7 @@ async def create_room( ) = await self._send_events_for_new_room( requester, room_id, + room_version, room_config=config, invite_list=invite_list, initial_state=initial_state, @@ -1017,6 +1023,7 @@ async def _send_events_for_new_room( self, creator: Requester, room_id: str, + room_version: RoomVersion, room_config: JsonDict, invite_list: List[str], initial_state: MutableStateMap, @@ -1039,6 +1046,8 @@ async def _send_events_for_new_room( the user requesting the room creation room_id: room id for the room being created + room_version: + The room version of the new room. room_config: A dict of configuration options. This will be the body of a /createRoom request; see @@ -1072,14 +1081,6 @@ async def _send_events_for_new_room( # (as this info can't be pulled from the db) state_map: MutableStateMap[str] = {} - def create_event_dict(etype: str, content: JsonDict, **kwargs: Any) -> JsonDict: - e = {"type": etype, "content": content} - - e.update(event_keys) - e.update(kwargs) - - return e - async def create_event( etype: str, content: JsonDict, @@ -1102,7 +1103,10 @@ async def create_event( nonlocal depth nonlocal prev_event - event_dict = create_event_dict(etype, content, **kwargs) + # Create the event dictionary. + event_dict = {"type": etype, "content": content} + event_dict.update(event_keys) + event_dict.update(kwargs) ( new_event, @@ -1139,7 +1143,9 @@ async def create_event( 400, f"'{preset_config}' is not a valid preset", errcode=Codes.BAD_JSON ) - creation_content.update({"creator": creator_id}) + # MSC2175 removes the creator field from the create event. + if not room_version.msc2175_implicit_room_creator: + creation_content["creator"] = creator_id creation_event, unpersisted_creation_context = await create_event( EventTypes.Create, creation_content, False ) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 509c5578895b..ec317e60239a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -96,7 +96,7 @@ def __init__(self, hs: "HomeServer"): self.member_as_limiter = Linearizer(max_count=10, name="member_as_limiter") self.clock = hs.get_clock() - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.third_party_event_rules = hs.get_third_party_event_rules() self._server_notices_mxid = self.config.servernotices.server_notices_mxid self._enable_lookup = hs.config.registration.enable_3pid_lookup @@ -806,7 +806,7 @@ async def update_membership_locked( ) block_invite_result = (Codes.FORBIDDEN, {}) - spam_check = await self.spam_checker.user_may_invite( + spam_check = await self._spam_checker_module_callbacks.user_may_invite( requester.user.to_string(), target_id, room_id ) if spam_check != NOT_SPAM: @@ -850,63 +850,68 @@ async def update_membership_locked( # `is_partial_state_room` also indicates whether `partial_state_before_join` is # partial. - # TODO: Refactor into dictionary of explicitly allowed transitions - # between old and new state, with specific error messages for some - # transitions and generic otherwise - old_state_id = partial_state_before_join.get( - (EventTypes.Member, target.to_string()) - ) - if old_state_id: - old_state = await self.store.get_event(old_state_id, allow_none=True) - old_membership = old_state.content.get("membership") if old_state else None - if action == "unban" and old_membership != "ban": - raise SynapseError( - 403, - "Cannot unban user who was not banned" - " (membership=%s)" % old_membership, - errcode=Codes.BAD_STATE, - ) - if old_membership == "ban" and action not in ["ban", "unban", "leave"]: - raise SynapseError( - 403, - "Cannot %s user who was banned" % (action,), - errcode=Codes.BAD_STATE, - ) - - if old_state: - same_content = content == old_state.content - same_membership = old_membership == effective_membership_state - same_sender = requester.user.to_string() == old_state.sender - if same_sender and same_membership and same_content: - # duplicate event. - # we know it was persisted, so must have a stream ordering. - assert old_state.internal_metadata.stream_ordering - return ( - old_state.event_id, - old_state.internal_metadata.stream_ordering, - ) + is_host_in_room = await self._is_host_in_room(partial_state_before_join) - if old_membership in ["ban", "leave"] and action == "kick": - raise AuthError(403, "The target user is not in the room") + # if we are not in the room, we won't have the current state + if is_host_in_room: + # TODO: Refactor into dictionary of explicitly allowed transitions + # between old and new state, with specific error messages for some + # transitions and generic otherwise + old_state_id = partial_state_before_join.get( + (EventTypes.Member, target.to_string()) + ) - # we don't allow people to reject invites to the server notice - # room, but they can leave it once they are joined. - if ( - old_membership == Membership.INVITE - and effective_membership_state == Membership.LEAVE - ): - is_blocked = await self.store.is_server_notice_room(room_id) - if is_blocked: + if old_state_id: + old_state = await self.store.get_event(old_state_id, allow_none=True) + old_membership = ( + old_state.content.get("membership") if old_state else None + ) + if action == "unban" and old_membership != "ban": + raise SynapseError( + 403, + "Cannot unban user who was not banned" + " (membership=%s)" % old_membership, + errcode=Codes.BAD_STATE, + ) + if old_membership == "ban" and action not in ["ban", "unban", "leave"]: raise SynapseError( - HTTPStatus.FORBIDDEN, - "You cannot reject this invite", - errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM, + 403, + "Cannot %s user who was banned" % (action,), + errcode=Codes.BAD_STATE, ) - else: - if action == "kick": - raise AuthError(403, "The target user is not in the room") - is_host_in_room = await self._is_host_in_room(partial_state_before_join) + if old_state: + same_content = content == old_state.content + same_membership = old_membership == effective_membership_state + same_sender = requester.user.to_string() == old_state.sender + if same_sender and same_membership and same_content: + # duplicate event. + # we know it was persisted, so must have a stream ordering. + assert old_state.internal_metadata.stream_ordering + return ( + old_state.event_id, + old_state.internal_metadata.stream_ordering, + ) + + if old_membership in ["ban", "leave"] and action == "kick": + raise AuthError(403, "The target user is not in the room") + + # we don't allow people to reject invites to the server notice + # room, but they can leave it once they are joined. + if ( + old_membership == Membership.INVITE + and effective_membership_state == Membership.LEAVE + ): + is_blocked = await self.store.is_server_notice_room(room_id) + if is_blocked: + raise SynapseError( + HTTPStatus.FORBIDDEN, + "You cannot reject this invite", + errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM, + ) + else: + if action == "kick": + raise AuthError(403, "The target user is not in the room") if effective_membership_state == Membership.JOIN: if requester.is_guest: @@ -935,8 +940,10 @@ async def update_membership_locked( # a room then they're allowed to join it. and not new_room ): - spam_check = await self.spam_checker.user_may_join_room( - target.to_string(), room_id, is_invited=inviter is not None + spam_check = ( + await self._spam_checker_module_callbacks.user_may_join_room( + target.to_string(), room_id, is_invited=inviter is not None + ) ) if spam_check != NOT_SPAM: raise SynapseError( @@ -1545,11 +1552,13 @@ async def do_3pid_invite( ) else: # Check if the spamchecker(s) allow this invite to go through. - spam_check = await self.spam_checker.user_may_send_3pid_invite( - inviter_userid=requester.user.to_string(), - medium=medium, - address=address, - room_id=room_id, + spam_check = ( + await self._spam_checker_module_callbacks.user_may_send_3pid_invite( + inviter_userid=requester.user.to_string(), + medium=medium, + address=address, + room_id=room_id, + ) ) if spam_check != NOT_SPAM: raise SynapseError( diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 4a27c0f05142..c28325323c44 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -383,6 +383,7 @@ async def complete_sso_login_request( grandfather_existing_users: Callable[[], Awaitable[Optional[str]]], extra_login_attributes: Optional[JsonDict] = None, auth_provider_session_id: Optional[str] = None, + registration_enabled: bool = True, ) -> None: """ Given an SSO ID, retrieve the user ID for it and possibly register the user. @@ -435,6 +436,10 @@ async def complete_sso_login_request( auth_provider_session_id: An optional session ID from the IdP. + registration_enabled: An optional boolean to enable/disable automatic + registrations of new users. If false and the user does not exist then the + flow is aborted. Defaults to true. + Raises: MappingException if there was a problem mapping the response to a user. RedirectException: if the mapping provider needs to redirect the user @@ -462,8 +467,16 @@ async def complete_sso_login_request( auth_provider_id, remote_user_id, user_id ) - # Otherwise, generate a new user. - if not user_id: + if not user_id and not registration_enabled: + logger.info( + "User does not exist and registration are disabled for IdP '%s' and remote_user_id '%s'", + auth_provider_id, + remote_user_id, + ) + raise MappingException( + "User does not exist and registrations are disabled" + ) + elif not user_id: # Otherwise, generate a new user. attributes = await self._call_attribute_mapper(sso_to_matrix_id_mapper) next_step_url = self._get_url_for_next_new_user_step( diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index 7b14ce2d089e..e497aa946695 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -960,6 +960,8 @@ async def compute_state_delta( timeline_state = {} + # Membership events to fetch that can be found in the room state, or in + # the case of partial state rooms, the auth events of timeline events. members_to_fetch = set() first_event_by_sender_map = {} for event in batch.events: @@ -981,9 +983,19 @@ async def compute_state_delta( # (if we are) to fix https://github.com/vector-im/riot-web/issues/7209 # We only need apply this on full state syncs given we disabled # LL for incr syncs in #3840. - members_to_fetch.add(sync_config.user.to_string()) - - state_filter = StateFilter.from_lazy_load_member_list(members_to_fetch) + # We don't insert ourselves into `members_to_fetch`, because in some + # rare cases (an empty event batch with a now_token after the user's + # leave in a partial state room which another local user has + # joined), the room state will be missing our membership and there + # is no guarantee that our membership will be in the auth events of + # timeline events when the room is partial stated. + state_filter = StateFilter.from_lazy_load_member_list( + members_to_fetch.union((sync_config.user.to_string(),)) + ) + else: + state_filter = StateFilter.from_lazy_load_member_list( + members_to_fetch + ) # We are happy to use partial state to compute the `/sync` response. # Since partial state may not include the lazy-loaded memberships we @@ -1005,7 +1017,9 @@ async def compute_state_delta( # sync's timeline and the start of the current sync's timeline. # See the docstring above for details. state_ids: StateMap[str] - + # We need to know whether the state we fetch may be partial, so check + # whether the room is partial stated *before* fetching it. + is_partial_state_room = await self.store.is_partial_state_room(room_id) if full_state: if batch: state_at_timeline_end = ( @@ -1136,7 +1150,7 @@ async def compute_state_delta( # If we only have partial state for the room, `state_ids` may be missing the # memberships we wanted. We attempt to find some by digging through the auth # events of timeline events. - if lazy_load_members and await self.store.is_partial_state_room(room_id): + if lazy_load_members and is_partial_state_room: assert members_to_fetch is not None assert first_event_by_sender_map is not None diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 77bdf0390154..932eb5e3567a 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -52,6 +52,11 @@ class RoomMember: FEDERATION_PING_INTERVAL = 40 * 1000 +# How long to remember a typing notification happened in a room before +# forgetting about it. +FORGET_TIMEOUT = 10 * 60 * 1000 + + class FollowerTypingHandler: """A typing handler on a different process than the writer that is updated via replication. @@ -83,7 +88,10 @@ def __init__(self, hs: "HomeServer"): self.wheel_timer: WheelTimer[RoomMember] = WheelTimer(bucket_size=5000) self._latest_room_serial = 0 + self._rooms_updated: Set[str] = set() + self.clock.looping_call(self._handle_timeouts, 5000) + self.clock.looping_call(self._prune_old_typing, FORGET_TIMEOUT) def _reset(self) -> None: """Reset the typing handler's data caches.""" @@ -92,6 +100,8 @@ def _reset(self) -> None: # map room IDs to sets of users currently typing self._room_typing = {} + self._rooms_updated = set() + self._member_last_federation_poke = {} self.wheel_timer = WheelTimer(bucket_size=5000) @@ -178,6 +188,7 @@ def process_replication_rows( prev_typing = self._room_typing.get(row.room_id, set()) now_typing = set(row.user_ids) self._room_typing[row.room_id] = now_typing + self._rooms_updated.add(row.room_id) if self.federation: run_as_background_process( @@ -209,6 +220,19 @@ async def _send_changes_in_typing_to_remotes( def get_current_token(self) -> int: return self._latest_room_serial + def _prune_old_typing(self) -> None: + """Prune rooms that haven't seen typing updates since last time. + + This is safe to do as clients should time out old typing notifications. + """ + stale_rooms = self._room_serials.keys() - self._rooms_updated + + for room_id in stale_rooms: + self._room_serials.pop(room_id, None) + self._room_typing.pop(room_id, None) + + self._rooms_updated = set() + class TypingWriterHandler(FollowerTypingHandler): def __init__(self, hs: "HomeServer"): @@ -388,6 +412,7 @@ def _push_update_local(self, member: RoomMember, typing: bool) -> None: self._typing_stream_change_cache.entity_has_changed( member.room_id, self._latest_room_serial ) + self._rooms_updated.add(member.room_id) self.notifier.on_new_event( StreamKeyType.TYPING, self._latest_room_serial, rooms=[member.room_id] diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index 28a92d41d6fc..05197edc9546 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -94,7 +94,7 @@ def __init__(self, hs: "HomeServer"): self.is_mine_id = hs.is_mine_id self.update_user_directory = hs.config.worker.should_update_user_directory self.search_all_users = hs.config.userdirectory.user_directory_search_all_users - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self._hs = hs # The current position in the current_state_delta stream @@ -149,7 +149,9 @@ async def search_users( # Remove any spammy users from the results. non_spammy_users = [] for user in results["results"]: - if not await self.spam_checker.check_username_for_spam(user): + if not await self._spam_checker_module_callbacks.check_username_for_spam( + user + ): non_spammy_users.append(user) results["results"] = non_spammy_users diff --git a/synapse/http/client.py b/synapse/http/client.py index d45ac94a047b..91fe474f36d9 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -312,40 +312,31 @@ def request( ) -class SimpleHttpClient: +class BaseHttpClient: """ A simple, no-frills HTTP client with methods that wrap up common ways of - using HTTP in Matrix + using HTTP in Matrix. Does not come with a default Agent, subclasses will need to + define their own. + + Args: + hs: The HomeServer instance to pass in + treq_args: Extra keyword arguments to be given to treq.request. """ + agent: IAgent + def __init__( self, hs: "HomeServer", treq_args: Optional[Dict[str, Any]] = None, - ip_whitelist: Optional[IPSet] = None, - ip_blacklist: Optional[IPSet] = None, - use_proxy: bool = False, - user_agent: Optional[str] = None, ): - """ - Args: - hs - treq_args: Extra keyword arguments to be given to treq.request. - ip_blacklist: The IP addresses that are blacklisted that - we may not request. - ip_whitelist: The whitelisted IP addresses, that we can - request if it were otherwise caught in a blacklist. - use_proxy: Whether proxy settings should be discovered and used - from conventional environment variables. - """ self.hs = hs + self.reactor = hs.get_reactor() - self._ip_whitelist = ip_whitelist - self._ip_blacklist = ip_blacklist self._extra_treq_args = treq_args or {} self.clock = hs.get_clock() - user_agent = user_agent or hs.version_string + user_agent = hs.version_string if hs.config.server.user_agent_suffix: user_agent = "%s %s" % ( user_agent, @@ -357,44 +348,6 @@ def __init__( # reactor. self._cooperator = Cooperator(scheduler=_make_scheduler(hs.get_reactor())) - if self._ip_blacklist: - # If we have an IP blacklist, we need to use a DNS resolver which - # filters out blacklisted IP addresses, to prevent DNS rebinding. - self.reactor: ISynapseReactor = BlacklistingReactorWrapper( - hs.get_reactor(), self._ip_whitelist, self._ip_blacklist - ) - else: - self.reactor = hs.get_reactor() - - # the pusher makes lots of concurrent SSL connections to sygnal, and - # tends to do so in batches, so we need to allow the pool to keep - # lots of idle connections around. - pool = HTTPConnectionPool(self.reactor) - # XXX: The justification for using the cache factor here is that larger instances - # will need both more cache and more connections. - # Still, this should probably be a separate dial - pool.maxPersistentPerHost = max(int(100 * hs.config.caches.global_factor), 5) - pool.cachedConnectionTimeout = 2 * 60 - - self.agent: IAgent = ProxyAgent( - self.reactor, - hs.get_reactor(), - connectTimeout=15, - contextFactory=self.hs.get_http_client_context_factory(), - pool=pool, - use_proxy=use_proxy, - ) - - if self._ip_blacklist: - # If we have an IP blacklist, we then install the blacklisting Agent - # which prevents direct access to IP addresses, that are not caught - # by the DNS resolution. - self.agent = BlacklistingAgentWrapper( - self.agent, - ip_blacklist=self._ip_blacklist, - ip_whitelist=self._ip_whitelist, - ) - async def request( self, method: str, @@ -800,6 +753,72 @@ async def get_file( ) +class SimpleHttpClient(BaseHttpClient): + """ + An HTTP client capable of crossing a proxy and respecting a block/allow list. + + This also configures a larger / longer lasting HTTP connection pool. + + Args: + hs: The HomeServer instance to pass in + treq_args: Extra keyword arguments to be given to treq.request. + ip_blacklist: The IP addresses that are blacklisted that + we may not request. + ip_whitelist: The whitelisted IP addresses, that we can + request if it were otherwise caught in a blacklist. + use_proxy: Whether proxy settings should be discovered and used + from conventional environment variables. + """ + + def __init__( + self, + hs: "HomeServer", + treq_args: Optional[Dict[str, Any]] = None, + ip_whitelist: Optional[IPSet] = None, + ip_blacklist: Optional[IPSet] = None, + use_proxy: bool = False, + ): + super().__init__(hs, treq_args=treq_args) + self._ip_whitelist = ip_whitelist + self._ip_blacklist = ip_blacklist + + if self._ip_blacklist: + # If we have an IP blacklist, we need to use a DNS resolver which + # filters out blacklisted IP addresses, to prevent DNS rebinding. + self.reactor: ISynapseReactor = BlacklistingReactorWrapper( + self.reactor, self._ip_whitelist, self._ip_blacklist + ) + + # the pusher makes lots of concurrent SSL connections to Sygnal, and tends to + # do so in batches, so we need to allow the pool to keep lots of idle + # connections around. + pool = HTTPConnectionPool(self.reactor) + # XXX: The justification for using the cache factor here is that larger + # instances will need both more cache and more connections. + # Still, this should probably be a separate dial + pool.maxPersistentPerHost = max(int(100 * hs.config.caches.global_factor), 5) + pool.cachedConnectionTimeout = 2 * 60 + + self.agent: IAgent = ProxyAgent( + self.reactor, + hs.get_reactor(), + connectTimeout=15, + contextFactory=self.hs.get_http_client_context_factory(), + pool=pool, + use_proxy=use_proxy, + ) + + if self._ip_blacklist: + # If we have an IP blacklist, we then install the blacklisting Agent + # which prevents direct access to IP addresses, that are not caught + # by the DNS resolution. + self.agent = BlacklistingAgentWrapper( + self.agent, + ip_blacklist=self._ip_blacklist, + ip_whitelist=self._ip_whitelist, + ) + + def _timeout_to_request_timed_out_error(f: Failure) -> Failure: if f.check(twisted_error.TimeoutError, twisted_error.ConnectingCancelledError): # The TCP connection has its own timeout (set by the 'connectTimeout' param @@ -967,3 +986,42 @@ def getContext(self) -> SSL.Context: def creatorForNetloc(self, hostname: bytes, port: int) -> IOpenSSLContextFactory: return self + + +def is_unknown_endpoint( + e: HttpResponseException, synapse_error: Optional[SynapseError] = None +) -> bool: + """ + Returns true if the response was due to an endpoint being unimplemented. + + Args: + e: The error response received from the remote server. + synapse_error: The above error converted to a SynapseError. This is + automatically generated if not provided. + + """ + if synapse_error is None: + synapse_error = e.to_synapse_error() + + # Matrix v1.6 specifies that servers should return a 404 or 405 with an errcode + # of M_UNRECOGNIZED when they receive a request to an unknown endpoint or + # to an unknown method, respectively. + # + # Older versions of servers don't return proper errors, so be graceful. But, + # also handle that some endpoints truly do return 404 errors. + return ( + # 404 is an unknown endpoint, 405 is a known endpoint, but unknown method. + (e.code == 404 or e.code == 405) + and ( + # Consider empty body or non-JSON bodies to be unrecognised (matches + # older Dendrites & Conduits). + not e.response + or not e.response.startswith(b"{") + # The proper response JSON with M_UNRECOGNIZED errcode. + or synapse_error.errcode == Codes.UNRECOGNIZED + ) + ) or ( + # Older Synapses returned a 400 error. + e.code == 400 + and synapse_error.errcode == Codes.UNRECOGNIZED + ) diff --git a/synapse/http/server.py b/synapse/http/server.py index 7b760505b25a..101dc2e747d0 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -46,6 +46,13 @@ from twisted.internet.defer import CancelledError from twisted.python import failure from twisted.web import resource + +try: + from twisted.web.pages import notFound +except ImportError: + from twisted.web.resource import NoResource as notFound # type: ignore[assignment] + +from twisted.web.resource import IResource from twisted.web.server import NOT_DONE_YET, Request from twisted.web.static import File from twisted.web.util import redirectTo @@ -569,6 +576,9 @@ def render_GET(self, request: Request) -> bytes: set_clickjacking_protection_headers(request) return super().render_GET(request) + def directoryListing(self) -> IResource: + return notFound() + class UnrecognizedRequestResource(resource.Resource): """ diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index 0070bd2940ea..fc6279362881 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -778,17 +778,13 @@ def parse_json_object_from_request( Model = TypeVar("Model", bound=BaseModel) -def parse_and_validate_json_object_from_request( - request: Request, model_type: Type[Model] -) -> Model: - """Parse a JSON object from the body of a twisted HTTP request, then deserialise and - validate using the given pydantic model. +def validate_json_object(content: JsonDict, model_type: Type[Model]) -> Model: + """Validate a deserialized JSON object using the given pydantic model. Raises: SynapseError if the request body couldn't be decoded as JSON or if it wasn't a JSON object. """ - content = parse_json_object_from_request(request, allow_empty_body=False) try: instance = model_type.parse_obj(content) except ValidationError as e: @@ -811,6 +807,20 @@ def parse_and_validate_json_object_from_request( return instance +def parse_and_validate_json_object_from_request( + request: Request, model_type: Type[Model] +) -> Model: + """Parse a JSON object from the body of a twisted HTTP request, then deserialise and + validate using the given pydantic model. + + Raises: + SynapseError if the request body couldn't be decoded as JSON or + if it wasn't a JSON object. + """ + content = parse_json_object_from_request(request, allow_empty_body=False) + return validate_json_object(content, model_type) + + def assert_params_in_dict(body: JsonDict, required: Iterable[str]) -> None: absent = [] for k in required: diff --git a/synapse/http/site.py b/synapse/http/site.py index 3a4d91f60b8a..3a35bfdc0192 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -19,6 +19,7 @@ import attr from zope.interface import implementer +from twisted.internet.address import UNIXAddress from twisted.internet.defer import Deferred from twisted.internet.interfaces import IAddress, IReactorTime from twisted.python.failure import Failure @@ -257,7 +258,7 @@ def render(self, resrc: Resource) -> None: request_id, request=ContextRequest( request_id=request_id, - ip_address=self.getClientAddress().host, + ip_address=self.get_client_ip_if_available(), site_tag=self.synapse_site.site_tag, # The requester is going to be unknown at this point. requester=None, @@ -415,7 +416,7 @@ def _started_processing(self, servlet_name: str) -> None: self.synapse_site.access_logger.debug( "%s - %s - Received request: %s %s", - self.getClientAddress().host, + self.get_client_ip_if_available(), self.synapse_site.site_tag, self.get_method(), self.get_redacted_uri(), @@ -463,7 +464,7 @@ def _finished_processing(self) -> None: "%s - %s - {%s}" " Processed request: %.3fsec/%.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)" ' %sB %s "%s %s %s" "%s" [%d dbevts]', - self.getClientAddress().host, + self.get_client_ip_if_available(), self.synapse_site.site_tag, requester, processing_time, @@ -501,6 +502,26 @@ def _should_log_request(self) -> bool: return True + def get_client_ip_if_available(self) -> str: + """Logging helper. Return something useful when a client IP is not retrievable + from a unix socket. + + In practice, this returns the socket file path on a SynapseRequest if using a + unix socket and the normal IP address for TCP sockets. + + """ + # getClientAddress().host returns a proper IP address for a TCP socket. But + # unix sockets have no concept of IP addresses or ports and return a + # UNIXAddress containing a 'None' value. In order to get something usable for + # logs(where this is used) get the unix socket file. getHost() returns a + # UNIXAddress containing a value of the socket file and has an instance + # variable of 'name' encoded as a byte string containing the path we want. + # Decode to utf-8 so it looks nice. + if isinstance(self.getClientAddress(), UNIXAddress): + return self.getHost().name.decode("utf-8") + else: + return self.getClientAddress().host + class XForwardedForRequest(SynapseRequest): """Request object which honours proxy headers diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index a7e22a91e115..a819d954070a 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -36,7 +36,6 @@ from twisted.internet.interfaces import IConsumer from twisted.protocols.basic import FileSender -import synapse from synapse.api.errors import NotFoundError from synapse.logging.context import defer_to_thread, make_deferred_yieldable from synapse.util import Clock @@ -74,7 +73,7 @@ def __init__( self.local_media_directory = local_media_directory self.filepaths = filepaths self.storage_providers = storage_providers - self.spam_checker = hs.get_spam_checker() + self._spam_checker_module_callbacks = hs.get_module_api_callbacks().spam_checker self.clock = hs.get_clock() async def store_file(self, source: IO, file_info: FileInfo) -> str: @@ -145,10 +144,10 @@ async def finish() -> None: f.flush() f.close() - spam_check = await self.spam_checker.check_media_file_for_spam( + spam_check = await self._spam_checker_module_callbacks.check_media_file_for_spam( ReadableFileWrapper(self.clock, fname), file_info ) - if spam_check != synapse.module_api.NOT_SPAM: + if spam_check != self._spam_checker_module_callbacks.NOT_SPAM: logger.info("Blocking media due to spam checker") # Note that we'll delete the stored media, due to the # try/except below. The media also won't be stored in diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 595c23e78d7e..eeafea74d15c 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -44,20 +44,6 @@ GET_USERS_FOR_STATES_CALLBACK, PresenceRouter, ) -from synapse.events.spamcheck import ( - CHECK_EVENT_FOR_SPAM_CALLBACK, - CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK, - CHECK_REGISTRATION_FOR_SPAM_CALLBACK, - CHECK_USERNAME_FOR_SPAM_CALLBACK, - SHOULD_DROP_FEDERATED_EVENT_CALLBACK, - USER_MAY_CREATE_ROOM_ALIAS_CALLBACK, - USER_MAY_CREATE_ROOM_CALLBACK, - USER_MAY_INVITE_CALLBACK, - USER_MAY_JOIN_ROOM_CALLBACK, - USER_MAY_PUBLISH_ROOM_CALLBACK, - USER_MAY_SEND_3PID_INVITE_CALLBACK, - SpamChecker, -) from synapse.events.third_party_rules import ( CHECK_CAN_DEACTIVATE_USER_CALLBACK, CHECK_CAN_SHUTDOWN_ROOM_CALLBACK, @@ -105,6 +91,20 @@ ON_LEGACY_SEND_MAIL_CALLBACK, ON_USER_REGISTRATION_CALLBACK, ) +from synapse.module_api.callbacks.spamchecker_callbacks import ( + CHECK_EVENT_FOR_SPAM_CALLBACK, + CHECK_MEDIA_FILE_FOR_SPAM_CALLBACK, + CHECK_REGISTRATION_FOR_SPAM_CALLBACK, + CHECK_USERNAME_FOR_SPAM_CALLBACK, + SHOULD_DROP_FEDERATED_EVENT_CALLBACK, + USER_MAY_CREATE_ROOM_ALIAS_CALLBACK, + USER_MAY_CREATE_ROOM_CALLBACK, + USER_MAY_INVITE_CALLBACK, + USER_MAY_JOIN_ROOM_CALLBACK, + USER_MAY_PUBLISH_ROOM_CALLBACK, + USER_MAY_SEND_3PID_INVITE_CALLBACK, + SpamCheckerModuleApiCallbacks, +) from synapse.rest.client.login import LoginResponse from synapse.storage import DataStore from synapse.storage.background_updates import ( @@ -147,7 +147,7 @@ """ PRESENCE_ALL_USERS = PresenceRouter.ALL_USERS -NOT_SPAM = SpamChecker.NOT_SPAM +NOT_SPAM = SpamCheckerModuleApiCallbacks.NOT_SPAM __all__ = [ "errors", @@ -271,7 +271,6 @@ def __init__(self, hs: "HomeServer", auth_handler: AuthHandler) -> None: self._public_room_list_manager = PublicRoomListManager(hs) self._account_data_manager = AccountDataManager(hs) - self._spam_checker = hs.get_spam_checker() self._third_party_event_rules = hs.get_third_party_event_rules() self._password_auth_provider = hs.get_password_auth_provider() self._presence_router = hs.get_presence_router() @@ -305,7 +304,7 @@ def register_spam_checker_callbacks( Added in Synapse v1.37.0. """ - return self._spam_checker.register_callbacks( + return self._callbacks.spam_checker.register_callbacks( check_event_for_spam=check_event_for_spam, should_drop_federated_event=should_drop_federated_event, user_may_join_room=user_may_join_room, diff --git a/synapse/module_api/callbacks/__init__.py b/synapse/module_api/callbacks/__init__.py index 3d977bf6558a..5cdb2c003a7c 100644 --- a/synapse/module_api/callbacks/__init__.py +++ b/synapse/module_api/callbacks/__init__.py @@ -12,11 +12,20 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from synapse.server import HomeServer + from synapse.module_api.callbacks.account_validity_callbacks import ( AccountValidityModuleApiCallbacks, ) +from synapse.module_api.callbacks.spamchecker_callbacks import ( + SpamCheckerModuleApiCallbacks, +) class ModuleApiCallbacks: - def __init__(self) -> None: + def __init__(self, hs: "HomeServer") -> None: self.account_validity = AccountValidityModuleApiCallbacks() + self.spam_checker = SpamCheckerModuleApiCallbacks(hs) diff --git a/synapse/events/spamcheck.py b/synapse/module_api/callbacks/spamchecker_callbacks.py similarity index 99% rename from synapse/events/spamcheck.py rename to synapse/module_api/callbacks/spamchecker_callbacks.py index 765c15bb5135..4456d1b81eb2 100644 --- a/synapse/events/spamcheck.py +++ b/synapse/module_api/callbacks/spamchecker_callbacks.py @@ -286,11 +286,10 @@ def run(*args: Any, **kwargs: Any) -> Awaitable: api.register_spam_checker_callbacks(**hooks) -class SpamChecker: +class SpamCheckerModuleApiCallbacks: NOT_SPAM: Literal["NOT_SPAM"] = "NOT_SPAM" def __init__(self, hs: "synapse.server.HomeServer") -> None: - self.hs = hs self.clock = hs.get_clock() self._check_event_for_spam_callbacks: List[CHECK_EVENT_FOR_SPAM_CALLBACK] = [] diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index a0c760239db8..9e3a98741a4f 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -103,7 +103,7 @@ class PusherConfig: id: Optional[str] user_name: str - access_token: Optional[int] + profile_tag: str kind: str app_id: str @@ -119,6 +119,11 @@ class PusherConfig: enabled: bool device_id: Optional[str] + # XXX(quenting): The access_token is not persisted anymore for new pushers, but we + # keep it when reading from the database, so that we don't get stale pushers + # while the "set_device_id_for_pushers" background update is running. + access_token: Optional[int] + def as_dict(self) -> Dict[str, Any]: """Information that can be retrieved about a pusher after creation.""" return { diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 93b255ced579..491a09b71d54 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -149,7 +149,7 @@ async def send_password_reset_mail( await self.send_email( email_address, self.email_subjects.password_reset - % {"server_name": self.hs.config.server.server_name}, + % {"server_name": self.hs.config.server.server_name, "app": self.app_name}, template_vars, ) diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index e2648cbc93c9..6517e3566fae 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -25,7 +25,7 @@ from synapse.push import Pusher, PusherConfig, PusherConfigException from synapse.push.pusher import PusherFactory from synapse.replication.http.push import ReplicationRemovePusherRestServlet -from synapse.types import JsonDict, RoomStreamToken +from synapse.types import JsonDict, RoomStreamToken, StrCollection from synapse.util.async_helpers import concurrently_execute from synapse.util.threepids import canonicalise_email @@ -97,7 +97,6 @@ def start(self) -> None: async def add_or_update_pusher( self, user_id: str, - access_token: Optional[int], kind: str, app_id: str, app_display_name: str, @@ -128,6 +127,22 @@ async def add_or_update_pusher( # stream ordering, so it will process pushes from this point onwards. last_stream_ordering = self.store.get_room_max_stream_ordering() + # Before we actually persist the pusher, we check if the user already has one + # for this app ID and pushkey. If so, we want to keep the access token and + # device ID in place, since this could be one device modifying + # (e.g. enabling/disabling) another device's pusher. + # XXX(quenting): Even though we're not persisting the access_token_id for new + # pushers anymore, we still need to copy existing access_token_ids over when + # updating a pusher, in case the "set_device_id_for_pushers" background update + # hasn't run yet. + access_token_id = None + existing_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey( + user_id, app_id, pushkey + ) + if existing_config: + device_id = existing_config.device_id + access_token_id = existing_config.access_token + # we try to create the pusher just to validate the config: it # will then get pulled out of the database, # recreated, added and started: this means we have only one @@ -136,7 +151,6 @@ async def add_or_update_pusher( PusherConfig( id=None, user_name=user_id, - access_token=access_token, profile_tag=profile_tag, kind=kind, app_id=app_id, @@ -151,23 +165,12 @@ async def add_or_update_pusher( failing_since=None, enabled=enabled, device_id=device_id, + access_token=access_token_id, ) ) - # Before we actually persist the pusher, we check if the user already has one - # this app ID and pushkey. If so, we want to keep the access token and device ID - # in place, since this could be one device modifying (e.g. enabling/disabling) - # another device's pusher. - existing_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey( - user_id, app_id, pushkey - ) - if existing_config: - access_token = existing_config.access_token - device_id = existing_config.device_id - await self.store.add_pusher( user_id=user_id, - access_token=access_token, kind=kind, app_id=app_id, app_display_name=app_display_name, @@ -180,6 +183,7 @@ async def add_or_update_pusher( profile_tag=profile_tag, enabled=enabled, device_id=device_id, + access_token_id=access_token_id, ) pusher = await self.process_pusher_change_by_id(app_id, pushkey, user_id) @@ -199,7 +203,7 @@ async def remove_pushers_by_app_id_and_pushkey_not_user( ) await self.remove_pusher(p.app_id, p.pushkey, p.user_name) - async def remove_pushers_by_access_token( + async def remove_pushers_by_access_tokens( self, user_id: str, access_tokens: Iterable[int] ) -> None: """Remove the pushers for a given user corresponding to a set of @@ -209,6 +213,8 @@ async def remove_pushers_by_access_token( user_id: user to remove pushers for access_tokens: access token *ids* to remove pushers for """ + # XXX(quenting): This is only needed until the "set_device_id_for_pushers" + # background update finishes tokens = set(access_tokens) for p in await self.store.get_pushers_by_user_id(user_id): if p.access_token in tokens: @@ -220,6 +226,26 @@ async def remove_pushers_by_access_token( ) await self.remove_pusher(p.app_id, p.pushkey, p.user_name) + async def remove_pushers_by_devices( + self, user_id: str, devices: StrCollection + ) -> None: + """Remove the pushers for a given user corresponding to a set of devices + + Args: + user_id: user to remove pushers for + devices: device IDs to remove pushers for + """ + device_ids = set(devices) + for p in await self.store.get_pushers_by_user_id(user_id): + if p.device_id in device_ids: + logger.info( + "Removing pusher for app id %s, pushkey %s, user %s", + p.app_id, + p.pushkey, + p.user_name, + ) + await self.remove_pusher(p.app_id, p.pushkey, p.user_name) + def on_new_notifications(self, max_token: RoomStreamToken) -> None: if not self.pushers: # nothing to do here. diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index c20d9c7e9da7..8c2c54c07a49 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -345,7 +345,7 @@ async def send_request(*, instance_name: str = "master", **kwargs: Any) -> Any: _outgoing_request_counter.labels(cls.NAME, 200).inc() # Wait on any streams that the remote may have written to. - for stream_name, position in result.get( + for stream_name, position in result.pop( _STREAM_POSITION_KEY, {} ).items(): await replication.wait_for_stream_position( diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 56a5c21910d9..a7248d7b2e49 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -14,36 +14,7 @@ """This module contains the implementation of both the client and server protocols. -The basic structure of the protocol is line based, where the initial word of -each line specifies the command. The rest of the line is parsed based on the -command. For example, the `RDATA` command is defined as:: - - RDATA - -(Note that `` may contains spaces, but cannot contain newlines.) - -Blank lines are ignored. - -# Example - -An example iteraction is shown below. Each line is prefixed with '>' or '<' to -indicate which side is sending, these are *not* included on the wire:: - - * connection established * - > SERVER localhost:8823 - > PING 1490197665618 - < NAME synapse.app.appservice - < PING 1490197665618 - < REPLICATE - > POSITION events 1 - > POSITION backfill 1 - > POSITION caches 1 - > RDATA caches 2 ["get_user_by_id",["@01register-user:localhost:8823"],1490197670513] - > RDATA events 14 ["ev", ["$149019767112vOHxz:localhost:8823", - "!AFDCvgApUmpdfVjIXm:localhost:8823","m.room.guest_access","",null]] - < PING 1490197675618 - > ERROR server stopping - * connection closed by server * +An explanation of this protocol is available in docs/tcp_replication.md """ import fcntl import logging diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index a4bdb48c0c99..c6088a0f99f8 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -152,8 +152,8 @@ async def get_updates(self) -> StreamUpdateResult: Returns: A triplet `(updates, new_last_token, limited)`, where `updates` is a list of `(token, row)` entries, `new_last_token` is the new - position in stream, and `limited` is whether there are more updates - to fetch. + position in stream (ie the highest token returned in the updates), + and `limited` is whether there are more updates to fetch. """ current_token = self.current_token(self.local_instance_name) updates, current_token, limited = await self.get_updates_since( diff --git a/synapse/res/templates/recaptcha.html b/synapse/res/templates/recaptcha.html index f00992a24be8..b80e5e8f2469 100644 --- a/synapse/res/templates/recaptcha.html +++ b/synapse/res/templates/recaptcha.html @@ -3,7 +3,11 @@ {% block header %} - + - - - - - -
-
- Create account:
- -
- -
- -
- -
- -
-
- - -
-
-
- - diff --git a/synapse/static/client/register/js/jquery-3.4.1.min.js b/synapse/static/client/register/js/jquery-3.4.1.min.js deleted file mode 100644 index a1c07fd803b5..000000000000 --- a/synapse/static/client/register/js/jquery-3.4.1.min.js +++ /dev/null @@ -1,2 +0,0 @@ -/*! jQuery v3.4.1 | (c) JS Foundation and other contributors | jquery.org/license */ -!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],E=C.document,r=Object.getPrototypeOf,s=t.slice,g=t.concat,u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},x=function(e){return null!=e&&e===e.window},c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.4.1",k=function(e,t){return new k.fn.init(e,t)},p=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function d(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp($),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+$),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(m.childNodes),m.childNodes),t[m.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&((e?e.ownerDocument||e:m)!==C&&T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!A[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&U.test(t)){(s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=k),o=(l=h(t)).length;while(o--)l[o]="#"+s+" "+xe(l[o]);c=l.join(","),f=ee.test(t)&&ye(e.parentNode)||e}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){A(t,!0)}finally{s===k&&e.removeAttribute("id")}}}return g(t.replace(B,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[k]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:m;return r!==C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),m!==C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=k,!C.getElementsByName||!C.getElementsByName(k).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+k+"-]").length||v.push("~="),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+k+"+*").length||v.push(".#.+[+~]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",$)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},D=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e===C||e.ownerDocument===m&&y(m,e)?-1:t===C||t.ownerDocument===m&&y(m,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e===C?-1:t===C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]===m?-1:s[r]===m?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==C&&T(e),d.matchesSelector&&E&&!A[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){A(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=p[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&p(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?k.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?k.grep(e,function(e){return e===n!==r}):"string"!=typeof n?k.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(k.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||q,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:L.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof k?t[0]:t,k.merge(this,k.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),D.test(r[1])&&k.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(k):k.makeArray(e,this)}).prototype=k.fn,q=k(E);var H=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){while((e=e[t])&&1!==e.nodeType);return e}k.fn.extend({has:function(e){var t=k(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i,ge={option:[1,""],thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?k.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;nx",y.noCloneChecked=!!me.cloneNode(!0).lastChild.defaultValue;var Te=/^key/,Ce=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,Ee=/^([^.]*)(?:\.(.+)|)/;function ke(){return!0}function Se(){return!1}function Ne(e,t){return e===function(){try{return E.activeElement}catch(e){}}()==("focus"===t)}function Ae(e,t,n,r,i,o){var a,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)Ae(e,s,n,r,t[s],o);return e}if(null==r&&null==i?(i=n,r=n=void 0):null==i&&("string"==typeof n?(i=r,r=void 0):(i=r,r=n,n=void 0)),!1===i)i=Se;else if(!i)return e;return 1===o&&(a=i,(i=function(e){return k().off(e),a.apply(this,arguments)}).guid=a.guid||(a.guid=k.guid++)),e.each(function(){k.event.add(this,t,i,r,n)})}function De(e,i,o){o?(Q.set(e,i,!1),k.event.add(e,i,{namespace:!1,handler:function(e){var t,n,r=Q.get(this,i);if(1&e.isTrigger&&this[i]){if(r.length)(k.event.special[i]||{}).delegateType&&e.stopPropagation();else if(r=s.call(arguments),Q.set(this,i,r),t=o(this,i),this[i](),r!==(n=Q.get(this,i))||t?Q.set(this,i,!1):n={},r!==n)return e.stopImmediatePropagation(),e.preventDefault(),n.value}else r.length&&(Q.set(this,i,{value:k.event.trigger(k.extend(r[0],k.Event.prototype),r.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===Q.get(e,i)&&k.event.add(e,i,ke)}k.event={global:{},add:function(t,e,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.get(t);if(v){n.handler&&(n=(o=n).handler,i=o.selector),i&&k.find.matchesSelector(ie,i),n.guid||(n.guid=k.guid++),(u=v.events)||(u=v.events={}),(a=v.handle)||(a=v.handle=function(e){return"undefined"!=typeof k&&k.event.triggered!==e.type?k.event.dispatch.apply(t,arguments):void 0}),l=(e=(e||"").match(R)||[""]).length;while(l--)d=g=(s=Ee.exec(e[l])||[])[1],h=(s[2]||"").split(".").sort(),d&&(f=k.event.special[d]||{},d=(i?f.delegateType:f.bindType)||d,f=k.event.special[d]||{},c=k.extend({type:d,origType:g,data:r,handler:n,guid:n.guid,selector:i,needsContext:i&&k.expr.match.needsContext.test(i),namespace:h.join(".")},o),(p=u[d])||((p=u[d]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(t,r,h,a)||t.addEventListener&&t.addEventListener(d,a)),f.add&&(f.add.call(t,c),c.handler.guid||(c.handler.guid=n.guid)),i?p.splice(p.delegateCount++,0,c):p.push(c),k.event.global[d]=!0)}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,f,p,d,h,g,v=Q.hasData(e)&&Q.get(e);if(v&&(u=v.events)){l=(t=(t||"").match(R)||[""]).length;while(l--)if(d=g=(s=Ee.exec(t[l])||[])[1],h=(s[2]||"").split(".").sort(),d){f=k.event.special[d]||{},p=u[d=(r?f.delegateType:f.bindType)||d]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),a=o=p.length;while(o--)c=p[o],!i&&g!==c.origType||n&&n.guid!==c.guid||s&&!s.test(c.namespace)||r&&r!==c.selector&&("**"!==r||!c.selector)||(p.splice(o,1),c.selector&&p.delegateCount--,f.remove&&f.remove.call(e,c));a&&!p.length&&(f.teardown&&!1!==f.teardown.call(e,h,v.handle)||k.removeEvent(e,d,v.handle),delete u[d])}else for(d in u)k.event.remove(e,d+t[l],n,r,!0);k.isEmptyObject(u)&&Q.remove(e,"handle events")}},dispatch:function(e){var t,n,r,i,o,a,s=k.event.fix(e),u=new Array(arguments.length),l=(Q.get(this,"events")||{})[s.type]||[],c=k.event.special[s.type]||{};for(u[0]=s,t=1;t\x20\t\r\n\f]*)[^>]*)\/>/gi,qe=/\s*$/g;function Oe(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&k(e).children("tbody")[0]||e}function Pe(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Re(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Me(e,t){var n,r,i,o,a,s,u,l;if(1===t.nodeType){if(Q.hasData(e)&&(o=Q.access(e),a=Q.set(t,o),l=o.events))for(i in delete a.handle,a.events={},l)for(n=0,r=l[i].length;n")},clone:function(e,t,n){var r,i,o,a,s,u,l,c=e.cloneNode(!0),f=oe(e);if(!(y.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||k.isXMLDoc(e)))for(a=ve(c),r=0,i=(o=ve(e)).length;r").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var Vt,Gt=[],Yt=/(=)\?(?=&|$)|\?\?/;k.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Gt.pop()||k.expando+"_"+kt++;return this[e]=!0,e}}),k.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Yt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Yt.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Yt,"$1"+r):!1!==e.jsonp&&(e.url+=(St.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||k.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?k(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,Gt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((Vt=E.implementation.createHTMLDocument("").body).innerHTML="
",2===Vt.childNodes.length),k.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=D.exec(e))?[t.createElement(i[1])]:(i=we([e],t,o),o&&o.length&&k(o).remove(),k.merge([],i.childNodes)));var r,i,o},k.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(k.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},k.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){k.fn[t]=function(e){return this.on(t,e)}}),k.expr.pseudos.animated=function(t){return k.grep(k.timers,function(e){return t===e.elem}).length},k.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=k.css(e,"position"),c=k(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=k.css(e,"top"),u=k.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,k.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},k.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){k.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===k.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===k.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=k(e).offset()).top+=k.css(e,"borderTopWidth",!0),i.left+=k.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-k.css(r,"marginTop",!0),left:t.left-i.left-k.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===k.css(e,"position"))e=e.offsetParent;return e||ie})}}),k.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;k.fn[t]=function(e){return _(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),k.each(["top","left"],function(e,n){k.cssHooks[n]=ze(y.pixelPosition,function(e,t){if(t)return t=_e(e,n),$e.test(t)?k(e).position()[n]+"px":t})}),k.each({Height:"height",Width:"width"},function(a,s){k.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){k.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return _(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?k.css(e,t,i):k.style(e,t,n,i)},s,n?e:void 0,n)}})}),k.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){k.fn[n]=function(e,t){return 0 "+JSON.stringify(response)); - submitPassword(user, pwd, response.session); - }).fail(function(err) { - Recaptcha.reload(); - errorFunc(err); - }); -}; - -var submitPassword = function(user, pwd, session) { - console.log("Registering..."); - var data = { - type: "m.login.password", - user: user, - password: pwd, - session: session - }; - $.post(matrixRegistration.endpoint, JSON.stringify(data), function(response) { - matrixRegistration.onRegistered( - response.home_server, response.user_id, response.access_token - ); - }).fail(errorFunc); -}; - -var errorFunc = function(err) { - if (err.responseJSON && err.responseJSON.error) { - setFeedbackString(err.responseJSON.error + " (" + err.responseJSON.errcode + ")"); - } - else { - setFeedbackString("Request failed: " + err.status); - } -}; - -var setFeedbackString = function(text) { - $("#feedback").text(text); -}; - -matrixRegistration.onLoad = function() { - setupCaptcha(); -}; - -matrixRegistration.signUp = function() { - var user = $("#desired_user_id").val(); - if (user.length == 0) { - setFeedbackString("Must specify a username."); - return; - } - var pwd1 = $("#pwd1").val(); - var pwd2 = $("#pwd2").val(); - if (pwd1.length < 6) { - setFeedbackString("Password: min. 6 characters."); - return; - } - if (pwd1 != pwd2) { - setFeedbackString("Passwords do not match."); - return; - } - if (window.matrixRegistration.isUsingRecaptcha) { - submitCaptcha(user, pwd1); - } - else { - submitPassword(user, pwd1); - } -}; - -matrixRegistration.onRegistered = function(hs_url, user_id, access_token) { - // clobber this function - console.warn("onRegistered - This function should be replaced to proceed."); -}; diff --git a/synapse/static/client/register/register_config.sample.js b/synapse/static/client/register/register_config.sample.js deleted file mode 100644 index c7ea180dee29..000000000000 --- a/synapse/static/client/register/register_config.sample.js +++ /dev/null @@ -1,3 +0,0 @@ -window.matrixRegistrationConfig = { - recaptcha_public_key: "YOUR_PUBLIC_KEY" -}; diff --git a/synapse/static/client/register/style.css b/synapse/static/client/register/style.css deleted file mode 100644 index 8a39b5d0f576..000000000000 --- a/synapse/static/client/register/style.css +++ /dev/null @@ -1,64 +0,0 @@ -html { - height: 100%; -} - -body { - height: 100%; - font-family: "Myriad Pro", "Myriad", Helvetica, Arial, sans-serif; - font-size: 12pt; - margin: 0px; -} - -h1 { - font-size: 20pt; -} - -a:link { color: #666; } -a:visited { color: #666; } -a:hover { color: #000; } -a:active { color: #000; } - -input { - width: 100% -} - -textarea, input { - font-family: inherit; - font-size: inherit; -} - -.smallPrint { - color: #888; - font-size: 9pt ! important; - font-style: italic ! important; -} - -#recaptcha_area { - margin: auto -} - -.g-recaptcha div { - margin: auto; -} - -#registrationForm { - text-align: left; - padding: 5px; - margin-bottom: 40px; - display: inline-block; - - -webkit-border-radius: 10px; - -moz-border-radius: 10px; - border-radius: 10px; - - -webkit-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15); - -moz-box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15); - box-shadow: 0px 0px 20px 0px rgba(0,0,0,0.15); - - background-color: #f8f8f8; - border: 1px #ccc solid; -} - -.error { - color: red; -} diff --git a/synapse/storage/database.py b/synapse/storage/database.py index fec4ae5b970d..1f5f5eb6f8c7 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -58,7 +58,7 @@ from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.background_updates import BackgroundUpdater from synapse.storage.engines import BaseDatabaseEngine, PostgresEngine, Sqlite3Engine -from synapse.storage.types import Connection, Cursor +from synapse.storage.types import Connection, Cursor, SQLQueryParameters from synapse.util.async_helpers import delay_cancellation from synapse.util.iterutils import batch_iter @@ -371,10 +371,18 @@ def execute_batch(self, sql: str, args: Iterable[Iterable[Any]]) -> None: if isinstance(self.database_engine, PostgresEngine): from psycopg2.extras import execute_batch + # TODO: is it safe for values to be Iterable[Iterable[Any]] here? + # https://www.psycopg.org/docs/extras.html?highlight=execute_batch#psycopg2.extras.execute_batch + # suggests each arg in args should be a sequence or mapping self._do_execute( lambda the_sql: execute_batch(self.txn, the_sql, args), sql ) else: + # TODO: is it safe for values to be Iterable[Iterable[Any]] here? + # https://docs.python.org/3/library/sqlite3.html?highlight=sqlite3#sqlite3.Cursor.executemany + # suggests that the outer collection may be iterable, but + # https://docs.python.org/3/library/sqlite3.html?highlight=sqlite3#how-to-use-placeholders-to-bind-values-in-sql-queries + # suggests that the inner collection should be a sequence or dict. self.executemany(sql, args) def execute_values( @@ -390,14 +398,20 @@ def execute_values( from psycopg2.extras import execute_values return self._do_execute( + # TODO: is it safe for values to be Iterable[Iterable[Any]] here? + # https://www.psycopg.org/docs/extras.html?highlight=execute_batch#psycopg2.extras.execute_values says values should be Sequence[Sequence] lambda the_sql: execute_values(self.txn, the_sql, values, fetch=fetch), sql, ) - def execute(self, sql: str, *args: Any) -> None: - self._do_execute(self.txn.execute, sql, *args) + def execute(self, sql: str, parameters: SQLQueryParameters = ()) -> None: + self._do_execute(self.txn.execute, sql, parameters) def executemany(self, sql: str, *args: Any) -> None: + # TODO: we should add a type for *args here. Looking at Cursor.executemany + # and DBAPI2 it ought to be Sequence[_Parameter], but we pass in + # Iterable[Iterable[Any]] in execute_batch and execute_values above, which mypy + # complains about. self._do_execute(self.txn.executemany, sql, *args) def executescript(self, sql: str) -> None: @@ -1504,8 +1518,8 @@ def simple_upsert_many_txn_emulated( self.engine.lock_table(txn, "user_ips") for keyv, valv in zip(key_values, value_values): - _keys = {x: y for x, y in zip(key_names, keyv)} - _vals = {x: y for x, y in zip(value_names, valv)} + _keys = dict(zip(key_names, keyv)) + _vals = dict(zip(value_names, valv)) self.simple_upsert_txn_emulated(txn, table, _keys, _vals, lock=False) diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 54a120d5ac43..9736730d5ead 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -622,14 +622,14 @@ def get_all_new_device_messages_txn( # We limit like this as we might have multiple rows per stream_id, and # we want to make sure we always get all entries for any stream_id # we return. - upper_pos = min(current_id, last_id + limit) + upto_token = min(current_id, last_id + limit) sql = ( "SELECT max(stream_id), user_id" " FROM device_inbox" " WHERE ? < stream_id AND stream_id <= ?" " GROUP BY user_id" ) - txn.execute(sql, (last_id, upper_pos)) + txn.execute(sql, (last_id, upto_token)) updates = [(row[0], row[1:]) for row in txn] sql = ( @@ -638,19 +638,13 @@ def get_all_new_device_messages_txn( " WHERE ? < stream_id AND stream_id <= ?" " GROUP BY destination" ) - txn.execute(sql, (last_id, upper_pos)) + txn.execute(sql, (last_id, upto_token)) updates.extend((row[0], row[1:]) for row in txn) # Order by ascending stream ordering updates.sort() - limited = False - upto_token = current_id - if len(updates) >= limit: - upto_token = updates[-1][0] - limited = True - - return updates, upto_token, limited + return updates, upto_token, upto_token < current_id return await self.db_pool.runInteraction( "get_all_new_device_messages", get_all_new_device_messages_txn diff --git a/synapse/storage/databases/main/directory.py b/synapse/storage/databases/main/directory.py index 44aa181174ac..3cb4c9072907 100644 --- a/synapse/storage/databases/main/directory.py +++ b/synapse/storage/databases/main/directory.py @@ -129,8 +129,6 @@ def alias_txn(txn: LoggingTransaction) -> None: 409, "Room alias %s already exists" % room_alias.to_string() ) - -class DirectoryStore(DirectoryWorkerStore): async def delete_room_alias(self, room_alias: RoomAlias) -> Optional[str]: room_id = await self.db_pool.runInteraction( "delete_room_alias", self._delete_room_alias_txn, room_alias @@ -201,3 +199,7 @@ def _update_aliases_for_room_txn(txn: LoggingTransaction) -> None: await self.db_pool.runInteraction( "_update_aliases_for_room_txn", _update_aliases_for_room_txn ) + + +class DirectoryStore(DirectoryWorkerStore): + pass diff --git a/synapse/storage/databases/main/e2e_room_keys.py b/synapse/storage/databases/main/e2e_room_keys.py index 9f8d2e4bea65..d01f28cc803f 100644 --- a/synapse/storage/databases/main/e2e_room_keys.py +++ b/synapse/storage/databases/main/e2e_room_keys.py @@ -13,17 +13,24 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Dict, Iterable, Mapping, Optional, Tuple, cast +from typing import TYPE_CHECKING, Dict, Iterable, Mapping, Optional, Tuple, cast from typing_extensions import Literal, TypedDict from synapse.api.errors import StoreError from synapse.logging.opentracing import log_kv, trace from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.storage.database import LoggingTransaction +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.types import JsonDict, JsonSerializable, StreamKeyType from synapse.util import json_encoder +if TYPE_CHECKING: + from synapse.server import HomeServer + class RoomKey(TypedDict): """`KeyBackupData` in the Matrix spec. @@ -37,7 +44,82 @@ class RoomKey(TypedDict): session_data: JsonSerializable -class EndToEndRoomKeyStore(SQLBaseStore): +class EndToEndRoomKeyBackgroundStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_update_handler( + "delete_e2e_backup_keys_for_deactivated_users", + self._delete_e2e_backup_keys_for_deactivated_users, + ) + + def _delete_keys_txn(self, txn: LoggingTransaction, user_id: str) -> None: + self.db_pool.simple_delete_txn( + txn, + table="e2e_room_keys", + keyvalues={"user_id": user_id}, + ) + + self.db_pool.simple_delete_txn( + txn, + table="e2e_room_keys_versions", + keyvalues={"user_id": user_id}, + ) + + async def _delete_e2e_backup_keys_for_deactivated_users( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Retroactively purges account data for users that have already been deactivated. + Gets run as a background update caused by a schema delta. + """ + + last_user: str = progress.get("last_user", "") + + def _delete_backup_keys_for_deactivated_users_txn( + txn: LoggingTransaction, + ) -> int: + sql = """ + SELECT name FROM users + WHERE deactivated = ? and name > ? + ORDER BY name ASC + LIMIT ? + """ + + txn.execute(sql, (1, last_user, batch_size)) + users = [row[0] for row in txn] + + for user in users: + self._delete_keys_txn(txn, user) + + if users: + self.db_pool.updates._background_update_progress_txn( + txn, + "delete_e2e_backup_keys_for_deactivated_users", + {"last_user": users[-1]}, + ) + + return len(users) + + number_deleted = await self.db_pool.runInteraction( + "_delete_backup_keys_for_deactivated_users", + _delete_backup_keys_for_deactivated_users_txn, + ) + + if number_deleted < batch_size: + await self.db_pool.updates._end_background_update( + "delete_e2e_backup_keys_for_deactivated_users" + ) + + return number_deleted + + +class EndToEndRoomKeyStore(EndToEndRoomKeyBackgroundStore): """The store for end to end room key backups. See https://spec.matrix.org/v1.1/client-server-api/#server-side-key-backups @@ -550,3 +632,29 @@ def _delete_e2e_room_keys_version_txn(txn: LoggingTransaction) -> None: await self.db_pool.runInteraction( "delete_e2e_room_keys_version", _delete_e2e_room_keys_version_txn ) + + async def bulk_delete_backup_keys_and_versions_for_user(self, user_id: str) -> None: + """ + Bulk deletes all backup room keys and versions for a given user. + + Args: + user_id: the user whose backup keys and versions we're deleting + """ + + def _delete_all_e2e_room_keys_and_versions_txn(txn: LoggingTransaction) -> None: + self.db_pool.simple_delete_txn( + txn, + table="e2e_room_keys", + keyvalues={"user_id": user_id}, + ) + + self.db_pool.simple_delete_txn( + txn, + table="e2e_room_keys_versions", + keyvalues={"user_id": user_id}, + ) + + await self.db_pool.runInteraction( + "delete_all_e2e_room_keys_and_versions", + _delete_all_e2e_room_keys_and_versions_txn, + ) diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index a3b6c8ae8e82..dc7768c50cab 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -51,7 +51,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import StreamIdGenerator from synapse.types import JsonDict -from synapse.util import json_encoder +from synapse.util import json_decoder, json_encoder from synapse.util.caches.descriptors import cached, cachedList from synapse.util.cancellation import cancellable from synapse.util.iterutils import batch_iter @@ -1028,14 +1028,17 @@ def get_device_stream_token(self) -> int: async def claim_e2e_one_time_keys( self, query_list: Iterable[Tuple[str, str, str]] - ) -> Dict[str, Dict[str, Dict[str, str]]]: + ) -> Tuple[Dict[str, Dict[str, Dict[str, JsonDict]]], List[Tuple[str, str, str]]]: """Take a list of one time keys out of the database. Args: query_list: An iterable of tuples of (user ID, device ID, algorithm). Returns: - A map of user ID -> a map device ID -> a map of key ID -> JSON bytes. + A tuple pf: + A map of user ID -> a map device ID -> a map of key ID -> JSON. + + A copy of the input which has not been fulfilled. """ @trace @@ -1115,7 +1118,8 @@ def _claim_e2e_one_time_key_returning( key_id, key_json = otk_row return f"{algorithm}:{key_id}", key_json - results: Dict[str, Dict[str, Dict[str, str]]] = {} + results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + missing: List[Tuple[str, str, str]] = [] for user_id, device_id, algorithm in query_list: if self.database_engine.supports_returning: # If we support RETURNING clause we can use a single query that @@ -1138,11 +1142,25 @@ def _claim_e2e_one_time_key_returning( device_results = results.setdefault(user_id, {}).setdefault( device_id, {} ) - device_results[claim_row[0]] = claim_row[1] - continue + device_results[claim_row[0]] = json_decoder.decode(claim_row[1]) + else: + missing.append((user_id, device_id, algorithm)) + + return results, missing + + async def claim_e2e_fallback_keys( + self, query_list: Iterable[Tuple[str, str, str]] + ) -> Dict[str, Dict[str, Dict[str, JsonDict]]]: + """Take a list of fallback keys out of the database. - # No one-time key available, so see if there's a fallback - # key + Args: + query_list: An iterable of tuples of (user ID, device ID, algorithm). + + Returns: + A map of user ID -> a map device ID -> a map of key ID -> JSON. + """ + results: Dict[str, Dict[str, Dict[str, JsonDict]]] = {} + for user_id, device_id, algorithm in query_list: row = await self.db_pool.simple_select_one( table="e2e_fallback_keys_json", keyvalues={ @@ -1179,7 +1197,7 @@ def _claim_e2e_one_time_key_returning( ) device_results = results.setdefault(user_id, {}).setdefault(device_id, {}) - device_results[f"{algorithm}:{key_id}"] = key_json + device_results[f"{algorithm}:{key_id}"] = json_decoder.decode(key_json) return results diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index ff3edeb7160b..ac19de183cb6 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -114,6 +114,10 @@ def __init__(self, room_id: str): class EventFederationWorkerStore(SignatureWorkerStore, EventsWorkerStore, SQLBaseStore): + # TODO: this attribute comes from EventPushActionWorkerStore. Should we inherit from + # that store so that mypy can deduce this for itself? + stream_ordering_month_ago: Optional[int] + def __init__( self, database: DatabasePool, @@ -1171,6 +1175,38 @@ def _get_min_depth_interaction( return int(min_depth) if min_depth is not None else None + async def have_room_forward_extremities_changed_since( + self, + room_id: str, + stream_ordering: int, + ) -> bool: + """Check if the forward extremities in a room have changed since the + given stream ordering + + Throws a StoreError if we have since purged the index for + stream_orderings from that point. + """ + assert self.stream_ordering_month_ago is not None + if stream_ordering <= self.stream_ordering_month_ago: + raise StoreError(400, f"stream_ordering too old {stream_ordering}") + + sql = """ + SELECT 1 FROM stream_ordering_to_exterm + WHERE stream_ordering > ? AND room_id = ? + LIMIT 1 + """ + + def have_room_forward_extremities_changed_since_txn( + txn: LoggingTransaction, + ) -> bool: + txn.execute(sql, (stream_ordering, room_id)) + return txn.fetchone() is not None + + return await self.db_pool.runInteraction( + "have_room_forward_extremities_changed_since", + have_room_forward_extremities_changed_since_txn, + ) + @cancellable async def get_forward_extremities_for_room_at_stream_ordering( self, room_id: str, stream_ordering: int @@ -1199,7 +1235,8 @@ async def get_forward_extremities_for_room_at_stream_ordering( # provided the last_change is recent enough, we now clamp the requested # stream_ordering to it. - if last_change > self.stream_ordering_month_ago: # type: ignore[attr-defined] + assert self.stream_ordering_month_ago is not None + if last_change > self.stream_ordering_month_ago: stream_ordering = min(last_change, stream_ordering) return await self._get_forward_extremeties_for_room(room_id, stream_ordering) @@ -1214,8 +1251,8 @@ async def _get_forward_extremeties_for_room( Throws a StoreError if we have since purged the index for stream_orderings from that point. """ - - if stream_ordering <= self.stream_ordering_month_ago: # type: ignore[attr-defined] + assert self.stream_ordering_month_ago is not None + if stream_ordering <= self.stream_ordering_month_ago: raise StoreError(400, "stream_ordering too old %s" % (stream_ordering,)) sql = """ @@ -1232,10 +1269,17 @@ def get_forward_extremeties_for_room_txn(txn: LoggingTransaction) -> List[str]: txn.execute(sql, (stream_ordering, room_id)) return [event_id for event_id, in txn] - return await self.db_pool.runInteraction( + event_ids = await self.db_pool.runInteraction( "get_forward_extremeties_for_room", get_forward_extremeties_for_room_txn ) + # If we didn't find any IDs, then we must have cleared out the + # associated `stream_ordering_to_exterm`. + if not event_ids: + raise StoreError(400, "stream_ordering too old %s" % (stream_ordering,)) + + return event_ids + def _get_connected_batch_event_backfill_results_txn( self, txn: LoggingTransaction, insertion_event_id: str, limit: int ) -> List[BackfillQueueNavigationItem]: @@ -1544,7 +1588,7 @@ async def get_event_ids_to_not_pull_from_backoff( self, room_id: str, event_ids: Collection[str], - ) -> List[str]: + ) -> Dict[str, int]: """ Filter down the events to ones that we've failed to pull before recently. Uses exponential backoff. @@ -1554,7 +1598,8 @@ async def get_event_ids_to_not_pull_from_backoff( event_ids: A list of events to filter down Returns: - List of event_ids that should not be attempted to be pulled + A dictionary of event_ids that should not be attempted to be pulled and the + next timestamp at which we may try pulling them again. """ event_failed_pull_attempts = await self.db_pool.simple_select_many_batch( table="event_failed_pull_attempts", @@ -1570,22 +1615,28 @@ async def get_event_ids_to_not_pull_from_backoff( ) current_time = self._clock.time_msec() - return [ - event_failed_pull_attempt["event_id"] - for event_failed_pull_attempt in event_failed_pull_attempts + + event_ids_with_backoff = {} + for event_failed_pull_attempt in event_failed_pull_attempts: + event_id = event_failed_pull_attempt["event_id"] # Exponential back-off (up to the upper bound) so we don't try to # pull the same event over and over. ex. 2hr, 4hr, 8hr, 16hr, etc. - if current_time - < event_failed_pull_attempt["last_attempt_ts"] - + ( - 2 - ** min( - event_failed_pull_attempt["num_attempts"], - BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS, + backoff_end_time = ( + event_failed_pull_attempt["last_attempt_ts"] + + ( + 2 + ** min( + event_failed_pull_attempt["num_attempts"], + BACKFILL_EVENT_EXPONENTIAL_BACKOFF_MAXIMUM_DOUBLING_STEPS, + ) ) + * BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS ) - * BACKFILL_EVENT_EXPONENTIAL_BACKOFF_STEP_MILLISECONDS - ] + + if current_time < backoff_end_time: # `backoff_end_time` is exclusive + event_ids_with_backoff[event_id] = backoff_end_time + + return event_ids_with_backoff async def get_missing_events( self, @@ -1657,20 +1708,11 @@ async def get_successor_events(self, event_id: str) -> List[str]: @wrap_as_background_process("delete_old_forward_extrem_cache") async def _delete_old_forward_extrem_cache(self) -> None: def _delete_old_forward_extrem_cache_txn(txn: LoggingTransaction) -> None: - # Delete entries older than a month, while making sure we don't delete - # the only entries for a room. sql = """ DELETE FROM stream_ordering_to_exterm - WHERE - room_id IN ( - SELECT room_id - FROM stream_ordering_to_exterm - WHERE stream_ordering > ? - ) AND stream_ordering < ? + WHERE stream_ordering < ? """ - txn.execute( - sql, (self.stream_ordering_month_ago, self.stream_ordering_month_ago) # type: ignore[attr-defined] - ) + txn.execute(sql, (self.stream_ordering_month_ago,)) await self.db_pool.runInteraction( "_delete_old_forward_extrem_cache", diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index fc9d1727c4ac..f652f54cb6d9 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -27,6 +27,7 @@ Optional, Set, Tuple, + cast, ) import attr @@ -1340,9 +1341,7 @@ def _update_outliers_txn( [event.event_id for event, _ in events_and_contexts], ) - have_persisted: Dict[str, bool] = { - event_id: outlier for event_id, outlier in txn - } + have_persisted = dict(cast(Iterable[Tuple[str, bool]], txn)) logger.debug( "_update_outliers_txn: events=%s have_persisted=%s", diff --git a/synapse/storage/databases/main/keys.py b/synapse/storage/databases/main/keys.py index 0a19f607bda1..89c37a4eb560 100644 --- a/synapse/storage/databases/main/keys.py +++ b/synapse/storage/databases/main/keys.py @@ -15,7 +15,7 @@ import itertools import logging -from typing import Any, Dict, Iterable, List, Optional, Tuple +from typing import Any, Dict, Iterable, List, Mapping, Optional, Tuple from signedjson.key import decode_verify_key_bytes @@ -95,7 +95,7 @@ async def store_server_verify_keys( self, from_server: str, ts_added_ms: int, - verify_keys: Iterable[Tuple[str, str, FetchKeyResult]], + verify_keys: Mapping[Tuple[str, str], FetchKeyResult], ) -> None: """Stores NACL verification keys for remote servers. Args: @@ -108,7 +108,7 @@ async def store_server_verify_keys( key_values = [] value_values = [] invalidations = [] - for server_name, key_id, fetch_result in verify_keys: + for (server_name, key_id), fetch_result in verify_keys.items(): key_values.append((server_name, key_id)) value_values.append( ( diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index 9a24f7a65526..87e28e22d3c5 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -509,19 +509,24 @@ def __init__( async def _set_device_id_for_pushers( self, progress: JsonDict, batch_size: int ) -> int: - """Background update to populate the device_id column of the pushers table.""" + """ + Background update to populate the device_id column and clear the access_token + column for the pushers table. + """ last_pusher_id = progress.get("pusher_id", 0) def set_device_id_for_pushers_txn(txn: LoggingTransaction) -> int: txn.execute( """ - SELECT p.id, at.device_id + SELECT + p.id AS pusher_id, + p.device_id AS pusher_device_id, + at.device_id AS token_device_id FROM pushers AS p - INNER JOIN access_tokens AS at + LEFT JOIN access_tokens AS at ON p.access_token = at.id WHERE p.access_token IS NOT NULL - AND at.device_id IS NOT NULL AND p.id > ? ORDER BY p.id LIMIT ? @@ -533,17 +538,31 @@ def set_device_id_for_pushers_txn(txn: LoggingTransaction) -> int: if len(rows) == 0: return 0 + # The reason we're clearing the access_token column here is a bit subtle. + # When a user logs out, we: + # (1) delete the access token + # (2) delete the device + # + # Ideally, we would delete the pushers only via its link to the device + # during (2), but since this background update might not have fully run yet, + # we're still deleting the pushers via the access token during (1). self.db_pool.simple_update_many_txn( txn=txn, table="pushers", key_names=("id",), - key_values=[(row["id"],) for row in rows], - value_names=("device_id",), - value_values=[(row["device_id"],) for row in rows], + key_values=[(row["pusher_id"],) for row in rows], + value_names=("device_id", "access_token"), + # If there was already a device_id on the pusher, we only want to clear + # the access_token column, so we keep the existing device_id. Otherwise, + # we set the device_id we got from joining the access_tokens table. + value_values=[ + (row["pusher_device_id"] or row["token_device_id"], None) + for row in rows + ], ) self.db_pool.updates._background_update_progress_txn( - txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["id"]} + txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["pusher_id"]} ) return len(rows) @@ -568,7 +587,6 @@ class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore): async def add_pusher( self, user_id: str, - access_token: Optional[int], kind: str, app_id: str, app_display_name: str, @@ -581,13 +599,13 @@ async def add_pusher( profile_tag: str = "", enabled: bool = True, device_id: Optional[str] = None, + access_token_id: Optional[int] = None, ) -> None: async with self._pushers_id_gen.get_next() as stream_id: await self.db_pool.simple_upsert( table="pushers", keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, values={ - "access_token": access_token, "kind": kind, "app_display_name": app_display_name, "device_display_name": device_display_name, @@ -599,6 +617,10 @@ async def add_pusher( "id": stream_id, "enabled": enabled, "device_id": device_id, + # XXX(quenting): We're only really persisting the access token ID + # when updating an existing pusher. This is in case the + # 'set_device_id_for_pushers' background update hasn't finished yet. + "access_token": access_token_id, }, desc="add_pusher", ) diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index bc3a83919c34..3955a8a9a589 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -472,12 +472,11 @@ async def get_applicable_edits( the event will map to None. """ - # We only allow edits for `m.room.message` events that have the same sender - # and event type. We can't assert these things during regular event auth so - # we have to do the checks post hoc. + # We only allow edits for events that have the same sender and event type. + # We can't assert these things during regular event auth so we have to do + # the checks post hoc. - # Fetches latest edit that has the same type and sender as the - # original, and is an `m.room.message`. + # Fetches latest edit that has the same type and sender as the original. if isinstance(self.database_engine, PostgresEngine): # The `DISTINCT ON` clause will pick the *first* row it encounters, # so ordering by origin server ts + event ID desc will ensure we get @@ -493,7 +492,6 @@ async def get_applicable_edits( WHERE %s AND relation_type = ? - AND edit.type = 'm.room.message' ORDER by original.event_id DESC, edit.origin_server_ts DESC, edit.event_id DESC """ else: @@ -512,7 +510,6 @@ async def get_applicable_edits( WHERE %s AND relation_type = ? - AND edit.type = 'm.room.message' ORDER by edit.origin_server_ts, edit.event_id """ diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 3825bd60797c..dd7dbb6901c8 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1998,6 +1998,9 @@ def _background_populate_rooms_creator_column_txn( for room_id, event_json in room_id_to_create_event_results: event_dict = db_to_json(event_json) + # The creator property might not exist in newer room versions, but + # for those versions the creator column should be properly populate + # during room creation. creator = event_dict.get("content").get(EventContentFields.ROOM_CREATOR) self.db_pool.simple_update_txn( @@ -2132,12 +2135,16 @@ async def upsert_room_on_join( # invalid, and it would fail auth checks anyway. raise StoreError(400, "No create event in state") - room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) + # Before MSC2175, the room creator was a separate field. + if not room_version.msc2175_implicit_room_creator: + room_creator = create_event.content.get(EventContentFields.ROOM_CREATOR) - if not isinstance(room_creator, str): - # If the create event does not have a creator then the room is - # invalid, and it would fail auth checks anyway. - raise StoreError(400, "No creator defined on the create event") + if not isinstance(room_creator, str): + # If the create event does not have a creator then the room is + # invalid, and it would fail auth checks anyway. + raise StoreError(400, "No creator defined on the create event") + else: + room_creator = create_event.sender await self.db_pool.simple_upsert( desc="upsert_room_on_join", diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 1a7d5dd4335a..937028061c40 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -442,7 +442,11 @@ async def get_rooms_for_local_user_where_membership_is( ) # Now we filter out forgotten and excluded rooms - rooms_to_exclude = await self.get_forgotten_rooms_for_user(user_id) + rooms_to_exclude: AbstractSet[str] = set() + + # Users can't forget joined/invited rooms, so we skip the check for such look ups. + if any(m not in (Membership.JOIN, Membership.INVITE) for m in membership_list): + rooms_to_exclude = await self.get_forgotten_rooms_for_user(user_id) if excluded_rooms is not None: # Take a copy to avoid mutating the in-cache set @@ -1414,6 +1418,12 @@ def __init__( columns=["user_id", "room_id"], where_clause="forgotten = 1", ) + self.db_pool.updates.register_background_index_update( + "room_membership_user_room_index", + index_name="room_membership_user_room_idx", + table="room_memberships", + columns=["user_id", "room_id"], + ) async def _background_add_membership_profile( self, progress: JsonDict, batch_size: int diff --git a/synapse/storage/databases/main/stats.py b/synapse/storage/databases/main/stats.py index d3393d8e493e..97c4dc2603d6 100644 --- a/synapse/storage/databases/main/stats.py +++ b/synapse/storage/databases/main/stats.py @@ -16,7 +16,17 @@ import logging from enum import Enum from itertools import chain -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union, cast +from typing import ( + TYPE_CHECKING, + Any, + Dict, + Iterable, + List, + Optional, + Tuple, + Union, + cast, +) from typing_extensions import Counter @@ -523,7 +533,7 @@ def _fetch_current_state_stats( """, (room_id,), ) - membership_counts = {membership: cnt for membership, cnt in txn} + membership_counts = dict(cast(Iterable[Tuple[str, int]], txn)) txn.execute( """ diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 54715ca019d2..5df72afb5991 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -41,6 +41,7 @@ Any, Collection, Dict, + Iterable, List, Optional, Set, @@ -50,7 +51,7 @@ ) import attr -from frozendict import frozendict +from immutabledict import immutabledict from typing_extensions import Literal from twisted.internet import defer @@ -557,7 +558,7 @@ def get_room_max_token(self) -> RoomStreamToken: if p > min_pos } - return RoomStreamToken(None, min_pos, frozendict(positions)) + return RoomStreamToken(None, min_pos, immutabledict(positions)) async def get_room_events_stream_for_rooms( self, @@ -1343,7 +1344,9 @@ def _reset_federation_positions_txn(self, txn: LoggingTransaction) -> None: GROUP BY type """ txn.execute(sql) - min_positions = {typ: pos for typ, pos in txn} # Map from type -> min position + min_positions = dict( + cast(Iterable[Tuple[str, int]], txn) + ) # Map from type -> min position # Ensure we do actually have some values here assert set(min_positions) == {"federation", "events"} diff --git a/synapse/storage/databases/main/user_directory.py b/synapse/storage/databases/main/user_directory.py index 97f09b73dde6..5d65faed164a 100644 --- a/synapse/storage/databases/main/user_directory.py +++ b/synapse/storage/databases/main/user_directory.py @@ -102,44 +102,34 @@ async def _populate_user_directory_createtables( ) -> int: # Get all the rooms that we want to process. def _make_staging_area(txn: LoggingTransaction) -> None: - sql = ( - "CREATE TABLE IF NOT EXISTS " - + TEMP_TABLE - + "_rooms(room_id TEXT NOT NULL, events BIGINT NOT NULL)" - ) - txn.execute(sql) - - sql = ( - "CREATE TABLE IF NOT EXISTS " - + TEMP_TABLE - + "_position(position TEXT NOT NULL)" - ) - txn.execute(sql) - - # Get rooms we want to process from the database - sql = """ - SELECT room_id, count(*) FROM current_state_events + sql = f""" + CREATE TABLE IF NOT EXISTS {TEMP_TABLE}_rooms AS + SELECT room_id, count(*) AS events + FROM current_state_events GROUP BY room_id """ txn.execute(sql) - rooms = list(txn.fetchall()) - self.db_pool.simple_insert_many_txn( - txn, TEMP_TABLE + "_rooms", keys=("room_id", "events"), values=rooms + txn.execute( + f"CREATE INDEX IF NOT EXISTS {TEMP_TABLE}_rooms_rm ON {TEMP_TABLE}_rooms (room_id)" ) - del rooms - - sql = ( - "CREATE TABLE IF NOT EXISTS " - + TEMP_TABLE - + "_users(user_id TEXT NOT NULL)" + txn.execute( + f"CREATE INDEX IF NOT EXISTS {TEMP_TABLE}_rooms_evs ON {TEMP_TABLE}_rooms (events)" ) - txn.execute(sql) - txn.execute("SELECT name FROM users") - users = list(txn.fetchall()) + sql = f""" + CREATE TABLE IF NOT EXISTS {TEMP_TABLE}_position ( + position TEXT NOT NULL + ) + """ + txn.execute(sql) - self.db_pool.simple_insert_many_txn( - txn, TEMP_TABLE + "_users", keys=("user_id",), values=users + sql = f""" + CREATE TABLE IF NOT EXISTS {TEMP_TABLE}_users AS + SELECT name AS user_id FROM users + """ + txn.execute(sql) + txn.execute( + f"CREATE INDEX IF NOT EXISTS {TEMP_TABLE}_users_idx ON {TEMP_TABLE}_users (user_id)" ) new_pos = await self.get_max_stream_id_in_current_state_deltas() @@ -222,12 +212,13 @@ def _get_next_batch( if not rooms_to_work_on: return None - # Get how many are left to process, so we can give status on how - # far we are in processing - txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms") - result = txn.fetchone() - assert result is not None - progress["remaining"] = result[0] + if "remaining" not in progress: + # Get how many are left to process, so we can give status on how + # far we are in processing + txn.execute("SELECT COUNT(*) FROM " + TEMP_TABLE + "_rooms") + result = txn.fetchone() + assert result is not None + progress["remaining"] = result[0] return rooms_to_work_on @@ -332,7 +323,14 @@ def _get_next_batch( if processed_event_count > batch_size: # Don't process any more rooms, we've hit our batch size. - return processed_event_count + break + + await self.db_pool.runInteraction( + "populate_user_directory", + self.db_pool.updates._background_update_progress_txn, + "populate_user_directory_process_rooms", + progress, + ) return processed_event_count @@ -356,13 +354,14 @@ def _get_next_batch(txn: LoggingTransaction) -> Optional[List[str]]: users_to_work_on = [x[0] for x in user_result] - # Get how many are left to process, so we can give status on how - # far we are in processing - sql = "SELECT COUNT(*) FROM " + TEMP_TABLE + "_users" - txn.execute(sql) - count_result = txn.fetchone() - assert count_result is not None - progress["remaining"] = count_result[0] + if "remaining" not in progress: + # Get how many are left to process, so we can give status on how + # far we are in processing + sql = "SELECT COUNT(*) FROM " + TEMP_TABLE + "_users" + txn.execute(sql) + count_result = txn.fetchone() + assert count_result is not None + progress["remaining"] = count_result[0] return users_to_work_on @@ -698,10 +697,17 @@ async def delete_all_from_user_dir(self) -> None: """Delete the entire user directory""" def _delete_all_from_user_dir_txn(txn: LoggingTransaction) -> None: - txn.execute("DELETE FROM user_directory") - txn.execute("DELETE FROM user_directory_search") - txn.execute("DELETE FROM users_in_public_rooms") - txn.execute("DELETE FROM users_who_share_private_rooms") + # SQLite doesn't support TRUNCATE. + # On Postgres, DELETE FROM does a table scan but TRUNCATE is more efficient. + truncate = ( + "DELETE FROM" + if isinstance(self.database_engine, Sqlite3Engine) + else "TRUNCATE" + ) + txn.execute(f"{truncate} user_directory") + txn.execute(f"{truncate} user_directory_search") + txn.execute(f"{truncate} users_in_public_rooms") + txn.execute(f"{truncate} users_who_share_private_rooms") txn.call_after(self.get_user_in_directory.invalidate_all) await self.db_pool.runInteraction( diff --git a/synapse/storage/engines/sqlite.py b/synapse/storage/engines/sqlite.py index 28751e89a5a5..ca8c59297c42 100644 --- a/synapse/storage/engines/sqlite.py +++ b/synapse/storage/engines/sqlite.py @@ -34,6 +34,13 @@ def __init__(self, database_config: Mapping[str, Any]): ":memory:", ) + # A connection to a database that has already been prepared, to use as a + # base for an in-memory connection. This is used during unit tests to + # speed up setting up the DB. + self._prepped_conn: Optional[sqlite3.Connection] = database_config.get( + "_TEST_PREPPED_CONN" + ) + if platform.python_implementation() == "PyPy": # pypy's sqlite3 module doesn't handle bytearrays, convert them # back to bytes. @@ -84,7 +91,15 @@ def on_new_connection(self, db_conn: "LoggingDatabaseConnection") -> None: # In memory databases need to be rebuilt each time. Ideally we'd # reuse the same connection as we do when starting up, but that # would involve using adbapi before we have started the reactor. - prepare_database(db_conn, self, config=None) + # + # If we have a `prepped_conn` we can use that to initialise the DB, + # otherwise we need to call `prepare_database`. + if self._prepped_conn is not None: + # Initialise the new DB from the pre-prepared DB. + assert isinstance(db_conn.conn, sqlite3.Connection) + self._prepped_conn.backup(db_conn.conn) + else: + prepare_database(db_conn, self, config=None) db_conn.create_function("rank", 1, _rank) db_conn.execute("PRAGMA foreign_keys = ON;") diff --git a/synapse/storage/schema/main/delta/74/02_set_device_id_for_pushers_bg_update.sql b/synapse/storage/schema/main/delta/74/02_set_device_id_for_pushers_bg_update.sql new file mode 100644 index 000000000000..1367fb626733 --- /dev/null +++ b/synapse/storage/schema/main/delta/74/02_set_device_id_for_pushers_bg_update.sql @@ -0,0 +1,19 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Triggers the background update to set the device_id for pushers +-- that don't have one, and clear the access_token column. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7402, 'set_device_id_for_pushers', '{}'); diff --git a/synapse/storage/schema/main/delta/74/03_room_membership_index.sql b/synapse/storage/schema/main/delta/74/03_room_membership_index.sql new file mode 100644 index 000000000000..81a7d9ff9cad --- /dev/null +++ b/synapse/storage/schema/main/delta/74/03_room_membership_index.sql @@ -0,0 +1,19 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add an index to `room_membership(user_id, room_id)` to make querying for +-- forgotten rooms faster. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7403, 'room_membership_user_room_index', '{}'); diff --git a/synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql b/synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql new file mode 100644 index 000000000000..a194f4cecef3 --- /dev/null +++ b/synapse/storage/schema/main/delta/74/04_delete_e2e_backup_keys_for_deactivated_users.sql @@ -0,0 +1,17 @@ +/* Copyright 2023 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (7404, 'delete_e2e_backup_keys_for_deactivated_users', '{}'); \ No newline at end of file diff --git a/synapse/storage/types.py b/synapse/storage/types.py index 56a00485393d..34ac80753012 100644 --- a/synapse/storage/types.py +++ b/synapse/storage/types.py @@ -31,14 +31,14 @@ Some very basic protocol definitions for the DB-API2 classes specified in PEP-249 """ -_Parameters = Union[Sequence[Any], Mapping[str, Any]] +SQLQueryParameters = Union[Sequence[Any], Mapping[str, Any]] class Cursor(Protocol): - def execute(self, sql: str, parameters: _Parameters = ...) -> Any: + def execute(self, sql: str, parameters: SQLQueryParameters = ...) -> Any: ... - def executemany(self, sql: str, parameters: Sequence[_Parameters]) -> Any: + def executemany(self, sql: str, parameters: Sequence[SQLQueryParameters]) -> Any: ... def fetchone(self) -> Optional[Tuple]: diff --git a/synapse/types/__init__.py b/synapse/types/__init__.py index 33363867c456..5cee9c3194f0 100644 --- a/synapse/types/__init__.py +++ b/synapse/types/__init__.py @@ -35,7 +35,7 @@ ) import attr -from frozendict import frozendict +from immutabledict import immutabledict from signedjson.key import decode_verify_key_bytes from signedjson.types import VerifyKey from typing_extensions import Final, TypedDict @@ -50,6 +50,7 @@ IReactorTCP, IReactorThreads, IReactorTime, + IReactorUNIX, ) from synapse.api.errors import Codes, SynapseError @@ -91,6 +92,7 @@ class ISynapseReactor( IReactorTCP, IReactorSSL, + IReactorUNIX, IReactorPluggableNameResolver, IReactorTime, IReactorCore, @@ -490,12 +492,12 @@ class RoomStreamToken: ) stream: int = attr.ib(validator=attr.validators.instance_of(int)) - instance_map: "frozendict[str, int]" = attr.ib( - factory=frozendict, + instance_map: "immutabledict[str, int]" = attr.ib( + factory=immutabledict, validator=attr.validators.deep_mapping( key_validator=attr.validators.instance_of(str), value_validator=attr.validators.instance_of(int), - mapping_validator=attr.validators.instance_of(frozendict), + mapping_validator=attr.validators.instance_of(immutabledict), ), ) @@ -531,7 +533,7 @@ async def parse(cls, store: "PurgeEventsStore", string: str) -> "RoomStreamToken return cls( topological=None, stream=stream, - instance_map=frozendict(instance_map), + instance_map=immutabledict(instance_map), ) except CancelledError: raise @@ -566,7 +568,7 @@ def copy_and_advance(self, other: "RoomStreamToken") -> "RoomStreamToken": for instance in set(self.instance_map).union(other.instance_map) } - return RoomStreamToken(None, max_stream, frozendict(instance_map)) + return RoomStreamToken(None, max_stream, immutabledict(instance_map)) def as_historical_tuple(self) -> Tuple[int, int]: """Returns a tuple of `(topological, stream)` for historical tokens. diff --git a/synapse/types/state.py b/synapse/types/state.py index 4b3071acce32..1e78a740475b 100644 --- a/synapse/types/state.py +++ b/synapse/types/state.py @@ -28,7 +28,7 @@ ) import attr -from frozendict import frozendict +from immutabledict import immutabledict from synapse.api.constants import EventTypes from synapse.types import MutableStateMap, StateKey, StateMap @@ -56,7 +56,7 @@ class StateFilter: appear in `types`. """ - types: "frozendict[str, Optional[FrozenSet[str]]]" + types: "immutabledict[str, Optional[FrozenSet[str]]]" include_others: bool = False def __attrs_post_init__(self) -> None: @@ -67,7 +67,7 @@ def __attrs_post_init__(self) -> None: object.__setattr__( self, "types", - frozendict({k: v for k, v in self.types.items() if v is not None}), + immutabledict({k: v for k, v in self.types.items() if v is not None}), ) @staticmethod @@ -112,7 +112,7 @@ def from_types(types: Iterable[Tuple[str, Optional[str]]]) -> "StateFilter": type_dict.setdefault(typ, set()).add(s) # type: ignore return StateFilter( - types=frozendict( + types=immutabledict( (k, frozenset(v) if v is not None else None) for k, v in type_dict.items() ) @@ -139,7 +139,7 @@ def from_lazy_load_member_list(members: Iterable[str]) -> "StateFilter": The new state filter """ return StateFilter( - types=frozendict({EventTypes.Member: frozenset(members)}), + types=immutabledict({EventTypes.Member: frozenset(members)}), include_others=True, ) @@ -159,7 +159,7 @@ def freeze( types_with_frozen_values[state_types] = None return StateFilter( - frozendict(types_with_frozen_values), include_others=include_others + immutabledict(types_with_frozen_values), include_others=include_others ) def return_expanded(self) -> "StateFilter": @@ -217,7 +217,7 @@ def return_expanded(self) -> "StateFilter": # We want to return all non-members, but only particular # memberships return StateFilter( - types=frozendict({EventTypes.Member: self.types[EventTypes.Member]}), + types=immutabledict({EventTypes.Member: self.types[EventTypes.Member]}), include_others=True, ) else: @@ -381,14 +381,16 @@ def get_member_split(self) -> Tuple["StateFilter", "StateFilter"]: if state_keys is None: member_filter = StateFilter.all() else: - member_filter = StateFilter(frozendict({EventTypes.Member: state_keys})) + member_filter = StateFilter( + immutabledict({EventTypes.Member: state_keys}) + ) elif self.include_others: member_filter = StateFilter.all() else: member_filter = StateFilter.none() non_member_filter = StateFilter( - types=frozendict( + types=immutabledict( {k: v for k, v in self.types.items() if k != EventTypes.Member} ), include_others=self.include_others, @@ -578,8 +580,8 @@ def must_await_full_state(self, is_mine_id: Callable[[str], bool]) -> bool: return False -_ALL_STATE_FILTER = StateFilter(types=frozendict(), include_others=True) +_ALL_STATE_FILTER = StateFilter(types=immutabledict(), include_others=True) _ALL_NON_MEMBER_STATE_FILTER = StateFilter( - types=frozendict({EventTypes.Member: frozenset()}), include_others=True + types=immutabledict({EventTypes.Member: frozenset()}), include_others=True ) -_NONE_STATE_FILTER = StateFilter(types=frozendict(), include_others=False) +_NONE_STATE_FILTER = StateFilter(types=immutabledict(), include_others=False) diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index 7be9d5f11335..9ddd26ccaa2d 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -18,7 +18,7 @@ from typing import Any, Callable, Dict, Generator, Optional, Sequence import attr -from frozendict import frozendict +from immutabledict import immutabledict from matrix_common.versionstring import get_distribution_version_string from typing_extensions import ParamSpec @@ -41,22 +41,18 @@ def _reject_invalid_json(val: Any) -> None: raise ValueError("Invalid JSON value: '%s'" % val) -def _handle_frozendict(obj: Any) -> Dict[Any, Any]: - """Helper for json_encoder. Makes frozendicts serializable by returning +def _handle_immutabledict(obj: Any) -> Dict[Any, Any]: + """Helper for json_encoder. Makes immutabledicts serializable by returning the underlying dict """ - if type(obj) is frozendict: + if type(obj) is immutabledict: # fishing the protected dict out of the object is a bit nasty, # but we don't really want the overhead of copying the dict. try: # Safety: we catch the AttributeError immediately below. - # See https://github.com/matrix-org/python-canonicaljson/issues/36#issuecomment-927816293 - # for discussion on how frozendict's internals have changed over time. - return obj._dict # type: ignore[attr-defined] + return obj._dict except AttributeError: - # When the C implementation of frozendict is used, - # there isn't a `_dict` attribute with a dict - # so we resort to making a copy of the frozendict + # If all else fails, resort to making a copy of the immutabledict return dict(obj) raise TypeError( "Object of type %s is not JSON serializable" % obj.__class__.__name__ @@ -64,11 +60,11 @@ def _handle_frozendict(obj: Any) -> Dict[Any, Any]: # A custom JSON encoder which: -# * handles frozendicts +# * handles immutabledicts # * produces valid JSON (no NaNs etc) # * reduces redundant whitespace json_encoder = json.JSONEncoder( - allow_nan=False, separators=(",", ":"), default=_handle_frozendict + allow_nan=False, separators=(",", ":"), default=_handle_immutabledict ) # Create a custom decoder to reject Python extensions to JSON. diff --git a/synapse/util/frozenutils.py b/synapse/util/frozenutils.py index 7223af1a36e7..889caa2601e3 100644 --- a/synapse/util/frozenutils.py +++ b/synapse/util/frozenutils.py @@ -14,14 +14,14 @@ import collections.abc from typing import Any -from frozendict import frozendict +from immutabledict import immutabledict def freeze(o: Any) -> Any: if isinstance(o, dict): - return frozendict({k: freeze(v) for k, v in o.items()}) + return immutabledict({k: freeze(v) for k, v in o.items()}) - if isinstance(o, frozendict): + if isinstance(o, immutabledict): return o if isinstance(o, (bytes, str)): diff --git a/tests/api/test_filtering.py b/tests/api/test_filtering.py index 0f45615160ed..6c6a9ab4b4a9 100644 --- a/tests/api/test_filtering.py +++ b/tests/api/test_filtering.py @@ -18,7 +18,6 @@ from unittest.mock import patch import jsonschema -from frozendict import frozendict from twisted.test.proto_helpers import MemoryReactor @@ -29,6 +28,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util import Clock +from synapse.util.frozenutils import freeze from tests import unittest from tests.events.test_utils import MockEvent @@ -343,12 +343,12 @@ def test_filter_labels(self) -> None: self.assertFalse(Filter(self.hs, definition)._check(event)) - # check it works with frozendicts too + # check it works with frozen dictionaries too event = MockEvent( sender="@foo:bar", type="m.room.message", room_id="!secretbase:unknown", - content=frozendict({EventContentFields.LABELS: ["#fun"]}), + content=freeze({EventContentFields.LABELS: ["#fun"]}), ) self.assertTrue(Filter(self.hs, definition)._check(event)) diff --git a/tests/appservice/test_api.py b/tests/appservice/test_api.py index 9d183b733ec2..7deb923a280d 100644 --- a/tests/appservice/test_api.py +++ b/tests/appservice/test_api.py @@ -16,6 +16,7 @@ from twisted.test.proto_helpers import MemoryReactor +from synapse.api.errors import HttpResponseException from synapse.appservice import ApplicationService from synapse.server import HomeServer from synapse.types import JsonDict @@ -64,8 +65,8 @@ def test_query_3pe_authenticates_token(self) -> None: } ] - URL_USER = f"{URL}/_matrix/app/unstable/thirdparty/user/{PROTOCOL}" - URL_LOCATION = f"{URL}/_matrix/app/unstable/thirdparty/location/{PROTOCOL}" + URL_USER = f"{URL}/_matrix/app/v1/thirdparty/user/{PROTOCOL}" + URL_LOCATION = f"{URL}/_matrix/app/v1/thirdparty/location/{PROTOCOL}" self.request_url = None @@ -105,3 +106,114 @@ async def get_json( ) self.assertEqual(self.request_url, URL_LOCATION) self.assertEqual(result, SUCCESS_RESULT_LOCATION) + + def test_fallback(self) -> None: + """ + Tests that the fallback to legacy URLs works. + """ + SUCCESS_RESULT_USER = [ + { + "protocol": PROTOCOL, + "userid": "@a:user", + "fields": { + "more": "fields", + }, + } + ] + + URL_USER = f"{URL}/_matrix/app/v1/thirdparty/user/{PROTOCOL}" + FALLBACK_URL_USER = f"{URL}/_matrix/app/unstable/thirdparty/user/{PROTOCOL}" + + self.request_url = None + self.v1_seen = False + + async def get_json( + url: str, + args: Mapping[Any, Any], + headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], + ) -> List[JsonDict]: + # Ensure the access token is passed as both a header and query arg. + if not headers.get("Authorization") or not args.get(b"access_token"): + raise RuntimeError("Access token not provided") + + self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) + self.assertEqual(args.get(b"access_token"), TOKEN) + self.request_url = url + if url == URL_USER: + self.v1_seen = True + raise HttpResponseException(404, "NOT_FOUND", b"NOT_FOUND") + elif url == FALLBACK_URL_USER: + return SUCCESS_RESULT_USER + else: + raise RuntimeError( + "URL provided was invalid. This should never be seen." + ) + + # We assign to a method, which mypy doesn't like. + self.api.get_json = Mock(side_effect=get_json) # type: ignore[assignment] + + result = self.get_success( + self.api.query_3pe(self.service, "user", PROTOCOL, {b"some": [b"field"]}) + ) + self.assertTrue(self.v1_seen) + self.assertEqual(self.request_url, FALLBACK_URL_USER) + self.assertEqual(result, SUCCESS_RESULT_USER) + + def test_claim_keys(self) -> None: + """ + Tests that the /keys/claim response is properly parsed for missing + keys. + """ + + RESPONSE: JsonDict = { + "@alice:example.org": { + "DEVICE_1": { + "signed_curve25519:AAAAHg": { + # We don't really care about the content of the keys, + # they get passed back transparently. + }, + "signed_curve25519:BBBBHg": {}, + }, + "DEVICE_2": {"signed_curve25519:CCCCHg": {}}, + }, + } + + async def post_json_get_json( + uri: str, + post_json: Any, + headers: Mapping[Union[str, bytes], Sequence[Union[str, bytes]]], + ) -> JsonDict: + # Ensure the access token is passed as both a header and query arg. + if not headers.get("Authorization"): + raise RuntimeError("Access token not provided") + + self.assertEqual(headers.get("Authorization"), [f"Bearer {TOKEN}"]) + return RESPONSE + + # We assign to a method, which mypy doesn't like. + self.api.post_json_get_json = Mock(side_effect=post_json_get_json) # type: ignore[assignment] + + MISSING_KEYS = [ + # Known user, known device, missing algorithm. + ("@alice:example.org", "DEVICE_1", "signed_curve25519:DDDDHg"), + # Known user, missing device. + ("@alice:example.org", "DEVICE_3", "signed_curve25519:EEEEHg"), + # Unknown user. + ("@bob:example.org", "DEVICE_4", "signed_curve25519:FFFFHg"), + ] + + claimed_keys, missing = self.get_success( + self.api.claim_client_keys( + self.service, + [ + # Found devices + ("@alice:example.org", "DEVICE_1", "signed_curve25519:AAAAHg"), + ("@alice:example.org", "DEVICE_1", "signed_curve25519:BBBBHg"), + ("@alice:example.org", "DEVICE_2", "signed_curve25519:CCCCHg"), + ] + + MISSING_KEYS, + ) + ) + + self.assertEqual(claimed_keys, RESPONSE) + self.assertEqual(missing, MISSING_KEYS) diff --git a/tests/config/test_appservice.py b/tests/config/test_appservice.py new file mode 100644 index 000000000000..d2d1a40dfcb8 --- /dev/null +++ b/tests/config/test_appservice.py @@ -0,0 +1,40 @@ +# Copyright 2023 Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from synapse.config.appservice import AppServiceConfig, ConfigError + +from tests.unittest import TestCase + + +class AppServiceConfigTest(TestCase): + def test_invalid_app_service_config_files(self) -> None: + for invalid_value in [ + "foobar", + 1, + None, + True, + False, + {}, + ["foo", "bar", False], + ]: + with self.assertRaises(ConfigError): + AppServiceConfig().read_config( + {"app_service_config_files": invalid_value} + ) + + def test_valid_app_service_config_files(self) -> None: + AppServiceConfig().read_config({"app_service_config_files": []}) + AppServiceConfig().read_config( + {"app_service_config_files": ["/not/a/real/path", "/not/a/real/path/2"]} + ) diff --git a/tests/config/test_workers.py b/tests/config/test_workers.py index ef6294ecb2ce..49a6bdf40837 100644 --- a/tests/config/test_workers.py +++ b/tests/config/test_workers.py @@ -14,14 +14,14 @@ from typing import Any, Mapping, Optional from unittest.mock import Mock -from frozendict import frozendict +from immutabledict import immutabledict from synapse.config import ConfigError from synapse.config.workers import WorkerConfig from tests.unittest import TestCase -_EMPTY_FROZENDICT: Mapping[str, Any] = frozendict() +_EMPTY_IMMUTABLEDICT: Mapping[str, Any] = immutabledict() class WorkerDutyConfigTestCase(TestCase): @@ -29,7 +29,7 @@ def _make_worker_config( self, worker_app: str, worker_name: Optional[str], - extras: Mapping[str, Any] = _EMPTY_FROZENDICT, + extras: Mapping[str, Any] = _EMPTY_IMMUTABLEDICT, ) -> WorkerConfig: root_config = Mock() root_config.worker_app = worker_app diff --git a/tests/crypto/test_keyring.py b/tests/crypto/test_keyring.py index 1b9696748fdc..66102ab93487 100644 --- a/tests/crypto/test_keyring.py +++ b/tests/crypto/test_keyring.py @@ -193,7 +193,7 @@ def test_verify_json_for_server(self) -> None: r = self.hs.get_datastores().main.store_server_verify_keys( "server9", int(time.time() * 1000), - [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))], + {("server9", get_key_id(key1)): FetchKeyResult(get_verify_key(key1), 1000)}, ) self.get_success(r) @@ -291,7 +291,7 @@ def test_verify_json_for_server_with_null_valid_until_ms(self) -> None: # None is not a valid value in FetchKeyResult, but we're abusing this # API to insert null values into the database. The nulls get converted # to 0 when fetched in KeyStore.get_server_verify_keys. - [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), None))], # type: ignore[arg-type] + {("server9", get_key_id(key1)): FetchKeyResult(get_verify_key(key1), None)}, # type: ignore[arg-type] ) self.get_success(r) diff --git a/tests/events/test_utils.py b/tests/events/test_utils.py index 4174a237ec84..1b179acb20fc 100644 --- a/tests/events/test_utils.py +++ b/tests/events/test_utils.py @@ -143,6 +143,13 @@ def test_basic_keys(self) -> None: room_version=RoomVersions.MSC2176, ) + # As of MSC3989 we now redact the origin key. + self.run_test( + {"type": "A", "origin": "example.com"}, + {"type": "A", "content": {}, "signatures": {}, "unsigned": {}}, + room_version=RoomVersions.MSC3989, + ) + def test_unsigned(self) -> None: """Ensure that unsigned properties get stripped (except age_ts and replaces_state).""" self.run_test( @@ -311,7 +318,11 @@ def test_redacts(self) -> None: """Redaction events have no special behaviour until MSC2174/MSC2176.""" self.run_test( - {"type": "m.room.redaction", "content": {"redacts": "$test2:domain"}}, + { + "type": "m.room.redaction", + "content": {"redacts": "$test2:domain"}, + "redacts": "$test2:domain", + }, { "type": "m.room.redaction", "content": {}, @@ -323,7 +334,11 @@ def test_redacts(self) -> None: # After MSC2174, redaction events keep the redacts content key. self.run_test( - {"type": "m.room.redaction", "content": {"redacts": "$test2:domain"}}, + { + "type": "m.room.redaction", + "content": {"redacts": "$test2:domain"}, + "redacts": "$test2:domain", + }, { "type": "m.room.redaction", "content": {"redacts": "$test2:domain"}, diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 6b4cba65d069..013b9ee5504f 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -23,18 +23,24 @@ from synapse.api.constants import RoomEncryptionAlgorithms from synapse.api.errors import Codes, SynapseError +from synapse.appservice import ApplicationService from synapse.handlers.device import DeviceHandler from synapse.server import HomeServer +from synapse.storage.databases.main.appservice import _make_exclusive_regex from synapse.types import JsonDict from synapse.util import Clock from tests import unittest from tests.test_utils import make_awaitable +from tests.unittest import override_config class E2eKeysHandlerTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: - return self.setup_test_homeserver(federation_client=mock.Mock()) + self.appservice_api = mock.Mock() + return self.setup_test_homeserver( + federation_client=mock.Mock(), application_service_api=self.appservice_api + ) def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.handler = hs.get_e2e_keys_handler() @@ -941,3 +947,190 @@ def test_query_all_devices_caches_result(self, device_ids: Iterable[str]) -> Non # The two requests to the local homeserver should be identical. self.assertEqual(response_1, response_2) + + @override_config({"experimental_features": {"msc3983_appservice_otk_claims": True}}) + def test_query_appservice(self) -> None: + local_user = "@boris:" + self.hs.hostname + device_id_1 = "xyz" + fallback_key = {"alg1:k1": "fallback_key1"} + device_id_2 = "abc" + otk = {"alg1:k2": "key2"} + + # Inject an appservice interested in this user. + appservice = ApplicationService( + token="i_am_an_app_service", + id="1234", + namespaces={"users": [{"regex": r"@boris:.+", "exclusive": True}]}, + # Note: this user does not have to match the regex above + sender="@as_main:test", + ) + self.hs.get_datastores().main.services_cache = [appservice] + self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex( + [appservice] + ) + + # Setup a response, but only for device 2. + self.appservice_api.claim_client_keys.return_value = make_awaitable( + ({local_user: {device_id_2: otk}}, [(local_user, device_id_1, "alg1")]) + ) + + # we shouldn't have any unused fallback keys yet + res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1) + ) + self.assertEqual(res, []) + + self.get_success( + self.handler.upload_keys_for_user( + local_user, + device_id_1, + {"fallback_keys": fallback_key}, + ) + ) + + # we should now have an unused alg1 key + fallback_res = self.get_success( + self.store.get_e2e_unused_fallback_key_types(local_user, device_id_1) + ) + self.assertEqual(fallback_res, ["alg1"]) + + # claiming an OTK when no OTKs are available should ask the appservice, then + # query the fallback keys. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + { + "one_time_keys": { + local_user: {device_id_1: "alg1", device_id_2: "alg1"} + } + }, + timeout=None, + ) + ) + self.assertEqual( + claim_res, + { + "failures": {}, + "one_time_keys": { + local_user: {device_id_1: fallback_key, device_id_2: otk} + }, + }, + ) + + @override_config({"experimental_features": {"msc3984_appservice_key_query": True}}) + def test_query_local_devices_appservice(self) -> None: + """Test that querying of appservices for keys overrides responses from the database.""" + local_user = "@boris:" + self.hs.hostname + device_1 = "abc" + device_2 = "def" + device_3 = "ghi" + + # There are 3 devices: + # + # 1. One which is uploaded to the homeserver. + # 2. One which is uploaded to the homeserver, but a newer copy is returned + # by the appservice. + # 3. One which is only returned by the appservice. + device_key_1: JsonDict = { + "user_id": local_user, + "device_id": device_1, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + "keys": { + "ed25519:abc": "base64+ed25519+key", + "curve25519:abc": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:abc": "base64+signature"}}, + } + device_key_2a: JsonDict = { + "user_id": local_user, + "device_id": device_2, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + "keys": { + "ed25519:def": "base64+ed25519+key", + "curve25519:def": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:def": "base64+signature"}}, + } + + device_key_2b: JsonDict = { + "user_id": local_user, + "device_id": device_2, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + # The device ID is the same (above), but the keys are different. + "keys": { + "ed25519:xyz": "base64+ed25519+key", + "curve25519:xyz": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:xyz": "base64+signature"}}, + } + device_key_3: JsonDict = { + "user_id": local_user, + "device_id": device_3, + "algorithms": [ + "m.olm.curve25519-aes-sha2", + RoomEncryptionAlgorithms.MEGOLM_V1_AES_SHA2, + ], + "keys": { + "ed25519:jkl": "base64+ed25519+key", + "curve25519:jkl": "base64+curve25519+key", + }, + "signatures": {local_user: {"ed25519:jkl": "base64+signature"}}, + } + + # Upload keys for devices 1 & 2a. + self.get_success( + self.handler.upload_keys_for_user( + local_user, device_1, {"device_keys": device_key_1} + ) + ) + self.get_success( + self.handler.upload_keys_for_user( + local_user, device_2, {"device_keys": device_key_2a} + ) + ) + + # Inject an appservice interested in this user. + appservice = ApplicationService( + token="i_am_an_app_service", + id="1234", + namespaces={"users": [{"regex": r"@boris:.+", "exclusive": True}]}, + # Note: this user does not have to match the regex above + sender="@as_main:test", + ) + self.hs.get_datastores().main.services_cache = [appservice] + self.hs.get_datastores().main.exclusive_user_regex = _make_exclusive_regex( + [appservice] + ) + + # Setup a response. + self.appservice_api.query_keys.return_value = make_awaitable( + { + "device_keys": { + local_user: {device_2: device_key_2b, device_3: device_key_3} + } + } + ) + + # Request all devices. + res = self.get_success(self.handler.query_local_devices({local_user: None})) + self.assertIn(local_user, res) + for res_key in res[local_user].values(): + res_key.pop("unsigned", None) + self.assertDictEqual( + res, + { + local_user: { + device_1: device_key_1, + device_2: device_key_2b, + device_3: device_key_3, + } + }, + ) diff --git a/tests/handlers/test_oidc.py b/tests/handlers/test_oidc.py index 951caaa6b3ba..0a8bae54fbea 100644 --- a/tests/handlers/test_oidc.py +++ b/tests/handlers/test_oidc.py @@ -922,7 +922,7 @@ def test_extra_attributes(self) -> None: auth_provider_session_id=None, ) - @override_config({"oidc_config": DEFAULT_CONFIG}) + @override_config({"oidc_config": {**DEFAULT_CONFIG, "enable_registration": True}}) def test_map_userinfo_to_user(self) -> None: """Ensure that mapping the userinfo returned from a provider to an MXID works properly.""" userinfo: dict = { @@ -975,6 +975,21 @@ def test_map_userinfo_to_user(self) -> None: "Mapping provider does not support de-duplicating Matrix IDs", ) + @override_config({"oidc_config": {**DEFAULT_CONFIG, "enable_registration": False}}) + def test_map_userinfo_to_user_does_not_register_new_user(self) -> None: + """Ensures new users are not registered if the enabled registration flag is disabled.""" + userinfo: dict = { + "sub": "test_user", + "username": "test_user", + } + request, _ = self.start_authorization(userinfo) + self.get_success(self.handler.handle_oidc_callback(request)) + self.complete_sso_login.assert_not_called() + self.assertRenderedError( + "mapping_error", + "User does not exist and registrations are disabled", + ) + @override_config({"oidc_config": {**DEFAULT_CONFIG, "allow_existing_users": True}}) def test_map_userinfo_to_existing_user(self) -> None: """Existing users can log in with OpenID Connect when allow_existing_users is True.""" diff --git a/tests/handlers/test_sso.py b/tests/handlers/test_sso.py index d6f43a98fcda..620ae3a4ba54 100644 --- a/tests/handlers/test_sso.py +++ b/tests/handlers/test_sso.py @@ -35,7 +35,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: ) return hs - async def test_set_avatar(self) -> None: + def test_set_avatar(self) -> None: """Tests successfully setting the avatar of a newly created user""" handler = self.hs.get_sso_handler() @@ -54,7 +54,7 @@ async def test_set_avatar(self) -> None: self.assertIsNot(profile["avatar_url"], None) @unittest.override_config({"max_avatar_size": 1}) - async def test_set_avatar_too_big_image(self) -> None: + def test_set_avatar_too_big_image(self) -> None: """Tests that saving an avatar fails when it is too big""" handler = self.hs.get_sso_handler() @@ -66,7 +66,7 @@ async def test_set_avatar_too_big_image(self) -> None: ) @unittest.override_config({"allowed_avatar_mimetypes": ["image/jpeg"]}) - async def test_set_avatar_incorrect_mime_type(self) -> None: + def test_set_avatar_incorrect_mime_type(self) -> None: """Tests that saving an avatar fails when its mime type is not allowed""" handler = self.hs.get_sso_handler() @@ -77,7 +77,7 @@ async def test_set_avatar_incorrect_mime_type(self) -> None: self.get_success(handler.set_avatar(user_id, "http://my.server/me.png")) ) - async def test_skip_saving_avatar_when_not_changed(self) -> None: + def test_skip_saving_avatar_when_not_changed(self) -> None: """Tests whether saving of avatar correctly skips if the avatar hasn't changed""" handler = self.hs.get_sso_handler() diff --git a/tests/handlers/test_user_directory.py b/tests/handlers/test_user_directory.py index da4d24082648..15a7dc681854 100644 --- a/tests/handlers/test_user_directory.py +++ b/tests/handlers/test_user_directory.py @@ -792,7 +792,7 @@ async def allow_all(user_profile: UserProfile) -> bool: return False # Configure a spam checker that does not filter any users. - spam_checker = self.hs.get_spam_checker() + spam_checker = self.hs.get_module_api_callbacks().spam_checker spam_checker._check_username_for_spam_callbacks = [allow_all] # The results do not change: diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index 870047d0f250..f0f2da65db0b 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -31,7 +31,6 @@ from synapse.api.errors import Codes from synapse.events import EventBase -from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.http.types import QueryParams from synapse.logging.context import make_deferred_yieldable from synapse.media._base import FileInfo @@ -39,6 +38,7 @@ from synapse.media.media_storage import MediaStorage, ReadableFileWrapper from synapse.media.storage_provider import FileStorageProviderBackend from synapse.module_api import ModuleApi +from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers from synapse.rest import admin from synapse.rest.client import login from synapse.server import HomeServer diff --git a/tests/module_api/test_api.py b/tests/module_api/test_api.py index 3a1929691e9c..758b4bc38bdd 100644 --- a/tests/module_api/test_api.py +++ b/tests/module_api/test_api.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Any, Dict +from typing import Any, Dict, Optional from unittest.mock import Mock from twisted.internet import defer @@ -21,6 +21,7 @@ from synapse.api.errors import NotFoundError from synapse.events import EventBase from synapse.federation.units import Transaction +from synapse.handlers.device import DeviceHandler from synapse.handlers.presence import UserPresenceState from synapse.handlers.push_rules import InvalidRuleException from synapse.module_api import ModuleApi @@ -773,6 +774,54 @@ def test_create_room(self) -> None: # Check room alias. self.assertIsNone(room_alias) + def test_on_logged_out(self) -> None: + """Test that on_logged_out module hook is properly called when logging out + a device, and that related pushers are still available at this time. + """ + device_id = "AAAAAAA" + user_id = self.register_user("test_on_logged_out", "secret") + self.login("test_on_logged_out", "secret", device_id) + + self.get_success( + self.hs.get_pusherpool().add_or_update_pusher( + user_id=user_id, + device_id=device_id, + kind="http", + app_id="m.http", + app_display_name="HTTP Push Notifications", + device_display_name="pushy push", + pushkey="a@example.com", + lang=None, + data={"url": "http://example.com/_matrix/push/v1/notify"}, + ) + ) + + # Setup a callback counting the number of pushers. + number_of_pushers_in_callback: Optional[int] = None + + async def _on_logged_out_mock( + user_id: str, device_id: Optional[str], access_token: str + ) -> None: + nonlocal number_of_pushers_in_callback + number_of_pushers_in_callback = len( + self.hs.get_pusherpool().pushers[user_id].values() + ) + + self.module_api.register_password_auth_provider_callbacks( + on_logged_out=_on_logged_out_mock + ) + + # Delete the device. + device_handler = self.hs.get_device_handler() + assert isinstance(device_handler, DeviceHandler) + self.get_success(device_handler.delete_devices(user_id, [device_id])) + + # Check that the callback was called and the pushers still existed. + self.assertEqual(number_of_pushers_in_callback, 1) + + # Ensure the pushers were deleted after the callback. + self.assertEqual(len(self.hs.get_pusherpool().pushers[user_id].values()), 0) + class ModuleApiWorkerTestCase(BaseModuleApiTestCase, BaseMultiWorkerStreamTestCase): """For testing ModuleApi functionality in a multi-worker setup""" diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index 46df0102f77a..9501096a7732 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -243,22 +243,28 @@ def test_user_mentions(self) -> None: ) # Non-dict mentions should be ignored. - mentions: Any - for mentions in (None, True, False, 1, "foo", []): - self.assertFalse( - self._create_and_process( - bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: mentions} + # + # Avoid C-S validation as these aren't expected. + with patch( + "synapse.events.validator.EventValidator.validate_new", + new=lambda s, event, config: True, + ): + mentions: Any + for mentions in (None, True, False, 1, "foo", []): + self.assertFalse( + self._create_and_process( + bulk_evaluator, {EventContentFields.MSC3952_MENTIONS: mentions} + ) ) - ) - # A non-list should be ignored. - for mentions in (None, True, False, 1, "foo", {}): - self.assertFalse( - self._create_and_process( - bulk_evaluator, - {EventContentFields.MSC3952_MENTIONS: {"user_ids": mentions}}, + # A non-list should be ignored. + for mentions in (None, True, False, 1, "foo", {}): + self.assertFalse( + self._create_and_process( + bulk_evaluator, + {EventContentFields.MSC3952_MENTIONS: {"user_ids": mentions}}, + ) ) - ) # The Matrix ID appearing anywhere in the list should notify. self.assertTrue( @@ -291,26 +297,32 @@ def test_user_mentions(self) -> None: ) # Invalid entries in the list are ignored. - self.assertFalse( - self._create_and_process( - bulk_evaluator, - { - EventContentFields.MSC3952_MENTIONS: { - "user_ids": [None, True, False, {}, []] - } - }, + # + # Avoid C-S validation as these aren't expected. + with patch( + "synapse.events.validator.EventValidator.validate_new", + new=lambda s, event, config: True, + ): + self.assertFalse( + self._create_and_process( + bulk_evaluator, + { + EventContentFields.MSC3952_MENTIONS: { + "user_ids": [None, True, False, {}, []] + } + }, + ) ) - ) - self.assertTrue( - self._create_and_process( - bulk_evaluator, - { - EventContentFields.MSC3952_MENTIONS: { - "user_ids": [None, True, False, {}, [], self.alice] - } - }, + self.assertTrue( + self._create_and_process( + bulk_evaluator, + { + EventContentFields.MSC3952_MENTIONS: { + "user_ids": [None, True, False, {}, [], self.alice] + } + }, + ) ) - ) # The legacy push rule should not mention if the mentions field exists. self.assertFalse( @@ -351,14 +363,20 @@ def test_room_mentions(self) -> None: ) # Invalid data should not notify. - mentions: Any - for mentions in (None, False, 1, "foo", [], {}): - self.assertFalse( - self._create_and_process( - bulk_evaluator, - {EventContentFields.MSC3952_MENTIONS: {"room": mentions}}, + # + # Avoid C-S validation as these aren't expected. + with patch( + "synapse.events.validator.EventValidator.validate_new", + new=lambda s, event, config: True, + ): + mentions: Any + for mentions in (None, False, 1, "foo", [], {}): + self.assertFalse( + self._create_and_process( + bulk_evaluator, + {EventContentFields.MSC3952_MENTIONS: {"room": mentions}}, + ) ) - ) # The legacy push rule should not mention if the mentions field exists. self.assertFalse( diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 4ea5472eb475..4b5c96aeaea8 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -105,7 +105,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.hs.get_datastores().main.get_user_by_access_token(self.access_token) ) assert user_tuple is not None - self.token_id = user_tuple.token_id + self.device_id = user_tuple.device_id # We need to add email to account before we can create a pusher. self.get_success( @@ -117,7 +117,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: pusher = self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=self.user_id, - access_token=self.token_id, + device_id=self.device_id, kind="email", app_id="m.email", app_display_name="Email Notifications", @@ -141,7 +141,7 @@ def test_need_validated_email(self) -> None: self.get_success_or_raise( self.hs.get_pusherpool().add_or_update_pusher( user_id=self.user_id, - access_token=self.token_id, + device_id=self.device_id, kind="email", app_id="m.email", app_display_name="Email Notifications", diff --git a/tests/push/test_http.py b/tests/push/test_http.py index eb013f767057..8a00f491a7bc 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -67,13 +67,13 @@ def test_invalid_configuration(self) -> None: self.hs.get_datastores().main.get_user_by_access_token(access_token) ) assert user_tuple is not None - token_id = user_tuple.token_id + device_id = user_tuple.device_id def test_data(data: Any) -> None: self.get_failure( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, - access_token=token_id, + device_id=device_id, kind="http", app_id="m.http", app_display_name="HTTP Push Notifications", @@ -114,12 +114,12 @@ def test_sends_http(self) -> None: self.hs.get_datastores().main.get_user_by_access_token(access_token) ) assert user_tuple is not None - token_id = user_tuple.token_id + device_id = user_tuple.device_id self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, - access_token=token_id, + device_id=device_id, kind="http", app_id="m.http", app_display_name="HTTP Push Notifications", @@ -235,12 +235,12 @@ def test_sends_high_priority_for_encrypted(self) -> None: self.hs.get_datastores().main.get_user_by_access_token(access_token) ) assert user_tuple is not None - token_id = user_tuple.token_id + device_id = user_tuple.device_id self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, - access_token=token_id, + device_id=device_id, kind="http", app_id="m.http", app_display_name="HTTP Push Notifications", @@ -356,12 +356,12 @@ def test_sends_high_priority_for_one_to_one_only(self) -> None: self.hs.get_datastores().main.get_user_by_access_token(access_token) ) assert user_tuple is not None - token_id = user_tuple.token_id + device_id = user_tuple.device_id self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, - access_token=token_id, + device_id=device_id, kind="http", app_id="m.http", app_display_name="HTTP Push Notifications", @@ -444,12 +444,12 @@ def test_sends_high_priority_for_mention(self) -> None: self.hs.get_datastores().main.get_user_by_access_token(access_token) ) assert user_tuple is not None - token_id = user_tuple.token_id + device_id = user_tuple.device_id self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, - access_token=token_id, + device_id=device_id, kind="http", app_id="m.http", app_display_name="HTTP Push Notifications", @@ -523,12 +523,12 @@ def test_sends_high_priority_for_atroom(self) -> None: self.hs.get_datastores().main.get_user_by_access_token(access_token) ) assert user_tuple is not None - token_id = user_tuple.token_id + device_id = user_tuple.device_id self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, - access_token=token_id, + device_id=device_id, kind="http", app_id="m.http", app_display_name="HTTP Push Notifications", @@ -631,12 +631,12 @@ def _test_push_unread_count(self) -> None: self.hs.get_datastores().main.get_user_by_access_token(access_token) ) assert user_tuple is not None - token_id = user_tuple.token_id + device_id = user_tuple.device_id self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, - access_token=token_id, + device_id=device_id, kind="http", app_id="m.http", app_display_name="HTTP Push Notifications", @@ -767,12 +767,12 @@ def _set_pusher(self, user_id: str, access_token: str, enabled: bool) -> None: self.hs.get_datastores().main.get_user_by_access_token(access_token) ) assert user_tuple is not None - token_id = user_tuple.token_id + device_id = user_tuple.device_id self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, - access_token=token_id, + device_id=device_id, kind="http", app_id="m.http", app_display_name="HTTP Push Notifications", @@ -781,7 +781,6 @@ def _set_pusher(self, user_id: str, access_token: str, enabled: bool) -> None: lang=None, data={"url": "http://example.com/_matrix/push/v1/notify"}, enabled=enabled, - device_id=user_tuple.device_id, ) ) @@ -898,19 +897,17 @@ def test_null_enabled(self) -> None: def test_update_different_device_access_token_device_id(self) -> None: """Tests that if we create a pusher from one device, the update it from another - device, the access token and device ID associated with the pusher stays the - same. + device, the device ID associated with the pusher stays the same. """ # Create a user with a pusher. user_id, access_token = self._make_user_with_pusher("user") - # Get the token ID for the current access token, since that's what we store in - # the pushers table. Also get the device ID from it. + # Get the device ID for the current access token, since that's what we store in + # the pushers table. user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) assert user_tuple is not None - token_id = user_tuple.token_id device_id = user_tuple.device_id # Generate a new access token, and update the pusher with it. @@ -923,10 +920,9 @@ def test_update_different_device_access_token_device_id(self) -> None: ) pushers: List[PusherConfig] = list(ret) - # Check that we still have one pusher, and that the access token and device ID - # associated with it didn't change. + # Check that we still have one pusher, and that the device ID associated with + # it didn't change. self.assertEqual(len(pushers), 1) - self.assertEqual(pushers[0].access_token, token_id) self.assertEqual(pushers[0].device_id, device_id) @override_config({"experimental_features": {"msc3881_enabled": True}}) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 331de20faf04..a229b2201606 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -14,8 +14,6 @@ from typing import Any, Dict, List, Optional, Union, cast -import frozendict - from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin @@ -318,11 +316,11 @@ def test_event_match_non_body(self) -> None: "pattern should only match at the start/end of the value", ) - # it should work on frozendicts too + # it should work on frozen dictionaries too self._assert_matches( condition, - frozendict.frozendict({"value": "FoobaZ"}), - "patterns should match on frozendicts", + freeze({"value": "FoobaZ"}), + "patterns should match on frozen dictionaries", ) # wildcards should match @@ -425,11 +423,11 @@ def test_exact_event_match_string(self) -> None: "incorrect types should not match", ) - # it should work on frozendicts too + # it should work on frozen dictionaries too self._assert_matches( condition, - frozendict.frozendict({"value": "foobaz"}), - "values should match on frozendicts", + freeze({"value": "foobaz"}), + "values should match on frozen dictionaries", ) def test_exact_event_match_boolean(self) -> None: @@ -546,11 +544,11 @@ def test_exact_event_property_contains(self) -> None: "does not search in a string", ) - # it should work on frozendicts too + # it should work on frozen dictionaries too self._assert_matches( condition, freeze({"value": ["foobaz"]}), - "values should match on frozendicts", + "values should match on frozen dictionaries", ) def test_no_body(self) -> None: diff --git a/tests/replication/_base.py b/tests/replication/_base.py index 46a8e2013e41..0f1a8a145f6f 100644 --- a/tests/replication/_base.py +++ b/tests/replication/_base.py @@ -54,6 +54,10 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): if not hiredis: skip = "Requires hiredis" + if not USE_POSTGRES_FOR_TESTS: + # Redis replication only takes place on Postgres + skip = "Requires Postgres" + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: # build a replication server server_factory = ReplicationStreamProtocolFactory(hs) diff --git a/tests/replication/slave/storage/test_events.py b/tests/replication/slave/storage/test_events.py index 5d24eea0ac6c..98147e704e11 100644 --- a/tests/replication/slave/storage/test_events.py +++ b/tests/replication/slave/storage/test_events.py @@ -412,8 +412,9 @@ def build_event( self.get_success( self.master_store.add_push_actions_to_staging( event.event_id, - {user_id: actions for user_id, actions in push_actions}, + dict(push_actions), {user_id: False for user_id, _ in push_actions}, + False, "main", ) ) diff --git a/tests/replication/tcp/streams/test_account_data.py b/tests/replication/tcp/streams/test_account_data.py index 01df1be0473c..b9075e3f207d 100644 --- a/tests/replication/tcp/streams/test_account_data.py +++ b/tests/replication/tcp/streams/test_account_data.py @@ -37,11 +37,6 @@ def test_update_function_room_account_data_limit(self) -> None: # also one global update self.get_success(store.add_account_data_for_user("test_user", "m.global", {})) - # tell the notifier to catch up to avoid duplicate rows. - # workaround for https://github.com/matrix-org/synapse/issues/7360 - # FIXME remove this when the above is fixed - self.replicate() - # check we're testing what we think we are: no rows should yet have been # received self.assertEqual([], self.test_handler.received_rdata_rows) diff --git a/tests/replication/tcp/streams/test_to_device.py b/tests/replication/tcp/streams/test_to_device.py new file mode 100644 index 000000000000..fb9eac668f19 --- /dev/null +++ b/tests/replication/tcp/streams/test_to_device.py @@ -0,0 +1,89 @@ +# Copyright 2023 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +import synapse +from synapse.replication.tcp.streams._base import _STREAM_UPDATE_TARGET_ROW_COUNT +from synapse.types import JsonDict + +from tests.replication._base import BaseStreamTestCase + +logger = logging.getLogger(__name__) + + +class ToDeviceStreamTestCase(BaseStreamTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.client.login.register_servlets, + ] + + def test_to_device_stream(self) -> None: + store = self.hs.get_datastores().main + + user1 = self.register_user("user1", "pass") + self.login("user1", "pass", "device") + user2 = self.register_user("user2", "pass") + self.login("user2", "pass", "device") + + # connect to pull the updates related to users creation/login + self.reconnect() + self.replicate() + self.test_handler.received_rdata_rows.clear() + # disconnect so we can accumulate the updates without pulling them + self.disconnect() + + msg: JsonDict = {} + msg["sender"] = "@sender:example.org" + msg["type"] = "m.new_device" + + # add messages to the device inbox for user1 up until the + # limit defined for a stream update batch + for i in range(0, _STREAM_UPDATE_TARGET_ROW_COUNT): + msg["content"] = {"device": {}} + messages = {user1: {"device": msg}} + + self.get_success( + store.add_messages_from_remote_to_device_inbox( + "example.org", + f"{i}", + messages, + ) + ) + + # add one more message, for user2 this time + # this message would be dropped before fixing #15335 + msg["content"] = {"device": {}} + messages = {user2: {"device": msg}} + + self.get_success( + store.add_messages_from_remote_to_device_inbox( + "example.org", + f"{_STREAM_UPDATE_TARGET_ROW_COUNT}", + messages, + ) + ) + + # replication is disconnected so we shouldn't get any updates yet + self.assertEqual([], self.test_handler.received_rdata_rows) + + # now reconnect to pull the updates + self.reconnect() + self.replicate() + + # we should receive the fact that we have to_device updates + # for user1 and user2 + received_rows = self.test_handler.received_rdata_rows + self.assertEqual(len(received_rows), 2) + self.assertEqual(received_rows[0][2].entity, user1) + self.assertEqual(received_rows[1][2].entity, user2) diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index 0798b021c3d0..dcb3e6669bb9 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -51,12 +51,12 @@ def _create_pusher_and_send_msg(self, localpart: str) -> str: self.hs.get_datastores().main.get_user_by_access_token(access_token) ) assert user_dict is not None - token_id = user_dict.token_id + device_id = user_dict.device_id self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, - access_token=token_id, + device_id=device_id, kind="http", app_id="m.http", app_display_name="HTTP Push Notifications", diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 4b8f889a71ba..b4241ceaf023 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -3047,12 +3047,12 @@ def test_get_pushers(self) -> None: self.store.get_user_by_access_token(other_user_token) ) assert user_tuple is not None - token_id = user_tuple.token_id + device_id = user_tuple.device_id self.get_success( self.hs.get_pusherpool().add_or_update_pusher( user_id=self.other_user, - access_token=token_id, + device_id=device_id, kind="http", app_id="m.http", app_display_name="HTTP Push Notifications", diff --git a/tests/rest/client/test_account.py b/tests/rest/client/test_account.py index 7f675c44a278..ac19f3c6daef 100644 --- a/tests/rest/client/test_account.py +++ b/tests/rest/client/test_account.py @@ -474,6 +474,163 @@ def test_pending_invites(self) -> None: self.assertEqual(len(memberships), 1, memberships) self.assertEqual(memberships[0].room_id, room_id, memberships) + def test_deactivate_account_deletes_server_side_backup_keys(self) -> None: + key_handler = self.hs.get_e2e_room_keys_handler() + room_keys = { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": False, + "session_data": "SSBBTSBBIEZJU0gK", + } + } + } + } + } + + user_id = self.register_user("missPiggy", "test") + tok = self.login("missPiggy", "test") + + # add some backup keys/versions + version = self.get_success( + key_handler.create_version( + user_id, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "first_version_auth_data", + }, + ) + ) + + self.get_success(key_handler.upload_room_keys(user_id, version, room_keys)) + + version2 = self.get_success( + key_handler.create_version( + user_id, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": "second_version_auth_data", + }, + ) + ) + + self.get_success(key_handler.upload_room_keys(user_id, version2, room_keys)) + + self.deactivate(user_id, tok) + store = self.hs.get_datastores().main + + # Check that the user has been marked as deactivated. + self.assertTrue(self.get_success(store.get_user_deactivated_status(user_id))) + + # Check that there are no entries in 'e2e_room_keys` and `e2e_room_keys_versions` + res = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + "e2e_room_keys", {"user_id": user_id}, "*", "simple_select" + ) + ) + self.assertEqual(len(res), 0) + + res2 = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + "e2e_room_keys_versions", {"user_id": user_id}, "*", "simple_select" + ) + ) + self.assertEqual(len(res2), 0) + + def test_background_update_deletes_deactivated_users_server_side_backup_keys( + self, + ) -> None: + key_handler = self.hs.get_e2e_room_keys_handler() + room_keys = { + "rooms": { + "!abc:matrix.org": { + "sessions": { + "c0ff33": { + "first_message_index": 1, + "forwarded_count": 1, + "is_verified": False, + "session_data": "SSBBTSBBIEZJU0gK", + } + } + } + } + } + self.store = self.hs.get_datastores().main + + # create a bunch of users and add keys for them + users = [] + for i in range(0, 20): + user_id = self.register_user("missPiggy" + str(i), "test") + users.append((user_id,)) + + # add some backup keys/versions + version = self.get_success( + key_handler.create_version( + user_id, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": str(i) + "_version_auth_data", + }, + ) + ) + + self.get_success(key_handler.upload_room_keys(user_id, version, room_keys)) + + version2 = self.get_success( + key_handler.create_version( + user_id, + { + "algorithm": "m.megolm_backup.v1", + "auth_data": str(i) + "_version_auth_data", + }, + ) + ) + + self.get_success(key_handler.upload_room_keys(user_id, version2, room_keys)) + + # deactivate most of the users by editing DB + self.get_success( + self.store.db_pool.simple_update_many( + table="users", + key_names=("name",), + key_values=users[0:18], + value_names=("deactivated",), + value_values=[(1,) for i in range(1, 19)], + desc="", + ) + ) + + # run background update + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": "delete_e2e_backup_keys_for_deactivated_users", + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # check that keys are deleted for the deactivated users but not the others + res = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + "e2e_room_keys", None, ("user_id",), "simple_select" + ) + ) + self.assertEqual(len(res), 4) + + res2 = self.get_success( + self.hs.get_datastores().main.db_pool.simple_select_list( + "e2e_room_keys_versions", None, ("user_id",), "simple_select" + ) + ) + self.assertEqual(len(res2), 4) + def deactivate(self, user_id: str, tok: str) -> None: request_data = { "auth": { diff --git a/tests/rest/client/test_redactions.py b/tests/rest/client/test_redactions.py index 5dfe44defbb3..84a60c0b0721 100644 --- a/tests/rest/client/test_redactions.py +++ b/tests/rest/client/test_redactions.py @@ -16,6 +16,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import EventTypes, RelationTypes +from synapse.api.room_versions import RoomVersions from synapse.rest import admin from synapse.rest.client import login, room, sync from synapse.server import HomeServer @@ -74,6 +75,7 @@ def _redact_event( event_id: str, expect_code: int = 200, with_relations: Optional[List[str]] = None, + content: Optional[JsonDict] = None, ) -> JsonDict: """Helper function to send a redaction event. @@ -81,7 +83,7 @@ def _redact_event( """ path = "/_matrix/client/r0/rooms/%s/redact/%s" % (room_id, event_id) - request_content = {} + request_content = content or {} if with_relations: request_content["org.matrix.msc3912.with_relations"] = with_relations @@ -92,7 +94,7 @@ def _redact_event( return channel.json_body def _sync_room_timeline(self, access_token: str, room_id: str) -> List[JsonDict]: - channel = self.make_request("GET", "sync", access_token=self.mod_access_token) + channel = self.make_request("GET", "sync", access_token=access_token) self.assertEqual(channel.code, 200) room_sync = channel.json_body["rooms"]["join"][room_id] return room_sync["timeline"]["events"] @@ -466,3 +468,36 @@ def test_redact_relations_txn_id_reuse(self) -> None: ) self.assertIn("body", event_dict["content"], event_dict) self.assertEqual("I'm in a thread!", event_dict["content"]["body"]) + + def test_content_redaction(self) -> None: + """MSC2174 moved the redacts property to the content.""" + # Create a room with the newer room version. + room_id = self.helper.create_room_as( + self.mod_user_id, + tok=self.mod_access_token, + room_version=RoomVersions.MSC2176.identifier, + ) + + # Create an event. + b = self.helper.send(room_id=room_id, tok=self.mod_access_token) + event_id = b["event_id"] + + # Attempt to redact it with a bogus event ID. + self._redact_event( + self.mod_access_token, + room_id, + event_id, + expect_code=400, + content={"redacts": "foo"}, + ) + + # Redact it for real. + self._redact_event(self.mod_access_token, room_id, event_id) + + # Sync the room, to get the id of the create event + timeline = self._sync_room_timeline(self.mod_access_token, room_id) + redact_event = timeline[-1] + self.assertEqual(redact_event["type"], EventTypes.Redaction) + # The redacts key should be in the content. + self.assertNotIn("redacts", redact_event) + self.assertEquals(redact_event["content"]["redacts"], event_id) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index a4900703c415..4d39c89f6f19 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -814,7 +814,9 @@ async def user_may_join_room( return False join_mock = Mock(side_effect=user_may_join_room) - self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append( + join_mock + ) channel = self.make_request( "POST", @@ -840,7 +842,9 @@ async def user_may_join_room_codes( return Codes.CONSENT_NOT_GIVEN join_mock = Mock(side_effect=user_may_join_room_codes) - self.hs.get_spam_checker()._user_may_join_room_callbacks.append(join_mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append( + join_mock + ) channel = self.make_request( "POST", @@ -1162,7 +1166,9 @@ async def user_may_join_room( # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None) - self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append( + callback_mock + ) # Join a first room, without being invited to it. self.helper.join(self.room1, self.user2, tok=self.tok2) @@ -1227,7 +1233,9 @@ async def user_may_join_room( # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. callback_mock = Mock(side_effect=user_may_join_room, spec=lambda *x: None) - self.hs.get_spam_checker()._user_may_join_room_callbacks.append(callback_mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_join_room_callbacks.append( + callback_mock + ) # Join a first room, without being invited to it. self.helper.join(self.room1, self.user2, tok=self.tok2) @@ -1643,7 +1651,7 @@ async def check_event_for_spam( spam_checker = SpamCheck() - self.hs.get_spam_checker()._check_event_for_spam_callbacks.append( + self.hs.get_module_api_callbacks().spam_checker._check_event_for_spam_callbacks.append( spam_checker.check_event_for_spam ) @@ -3381,7 +3389,9 @@ def test_threepid_invite_spamcheck_deprecated(self) -> None: # `spec` argument is needed for this function mock to have `__qualname__`, which # is needed for `Measure` metrics buried in SpamChecker. mock = Mock(return_value=make_awaitable(True), spec=lambda *x: None) - self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_send_3pid_invite_callbacks.append( + mock + ) # Send a 3PID invite into the room and check that it succeeded. email_to_invite = "teresa@example.com" @@ -3446,7 +3456,9 @@ def test_threepid_invite_spamcheck(self) -> None: return_value=make_awaitable(synapse.module_api.NOT_SPAM), spec=lambda *x: None, ) - self.hs.get_spam_checker()._user_may_send_3pid_invite_callbacks.append(mock) + self.hs.get_module_api_callbacks().spam_checker._user_may_send_3pid_invite_callbacks.append( + mock + ) # Send a 3PID invite into the room and check that it succeeded. email_to_invite = "teresa@example.com" diff --git a/tests/server.py b/tests/server.py index 5de97227669c..a49dc90e3272 100644 --- a/tests/server.py +++ b/tests/server.py @@ -16,6 +16,7 @@ import logging import os import os.path +import sqlite3 import time import uuid import warnings @@ -72,14 +73,16 @@ from synapse.config.database import DatabaseConnectionConfig from synapse.config.homeserver import HomeServerConfig from synapse.events.presence_router import load_legacy_presence_router -from synapse.events.spamcheck import load_legacy_spam_checkers from synapse.events.third_party_rules import load_legacy_third_party_event_rules from synapse.handlers.auth import load_legacy_password_auth_providers from synapse.http.site import SynapseRequest from synapse.logging.context import ContextResourceUsage +from synapse.module_api.callbacks.spamchecker_callbacks import load_legacy_spam_checkers from synapse.server import HomeServer from synapse.storage import DataStore +from synapse.storage.database import LoggingDatabaseConnection from synapse.storage.engines import PostgresEngine, create_engine +from synapse.storage.prepare_database import prepare_database from synapse.types import ISynapseReactor, JsonDict from synapse.util import Clock @@ -104,6 +107,10 @@ # the type of thing that can be passed into `make_request` in the headers list CustomHeaderType = Tuple[Union[str, bytes], Union[str, bytes]] +# A pre-prepared SQLite DB that is used as a template when creating new SQLite +# DB each test run. This dramatically speeds up test set up when using SQLite. +PREPPED_SQLITE_DB_CONN: Optional[LoggingDatabaseConnection] = None + class TimedOutException(Exception): """ @@ -899,6 +906,22 @@ def setup_test_homeserver( "args": {"database": test_db_location, "cp_min": 1, "cp_max": 1}, } + # Check if we have set up a DB that we can use as a template. + global PREPPED_SQLITE_DB_CONN + if PREPPED_SQLITE_DB_CONN is None: + temp_engine = create_engine(database_config) + PREPPED_SQLITE_DB_CONN = LoggingDatabaseConnection( + sqlite3.connect(":memory:"), temp_engine, "PREPPED_CONN" + ) + + database = DatabaseConnectionConfig("master", database_config) + config.database.databases = [database] + prepare_database( + PREPPED_SQLITE_DB_CONN, create_engine(database_config), config + ) + + database_config["_TEST_PREPPED_CONN"] = PREPPED_SQLITE_DB_CONN + if "db_txn_limit" in kwargs: database_config["txn_limit"] = kwargs["db_txn_limit"] @@ -983,7 +1006,9 @@ def cleanup() -> None: dropped = True except psycopg2.OperationalError as e: warnings.warn( - "Couldn't drop old db: " + str(e), category=UserWarning + "Couldn't drop old db: " + str(e), + category=UserWarning, + stacklevel=2, ) time.sleep(0.5) @@ -991,7 +1016,11 @@ def cleanup() -> None: db_conn.close() if not dropped: - warnings.warn("Failed to drop old DB.", category=UserWarning) + warnings.warn( + "Failed to drop old DB.", + category=UserWarning, + stacklevel=2, + ) if not LEAVE_DB: # Register the cleanup hook diff --git a/tests/storage/test_event_federation.py b/tests/storage/test_event_federation.py index 3e1984c15cf0..81e50bdd5523 100644 --- a/tests/storage/test_event_federation.py +++ b/tests/storage/test_event_federation.py @@ -1143,19 +1143,24 @@ def test_get_event_ids_to_not_pull_from_backoff(self) -> None: tok = self.login("alice", "test") room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + failure_time = self.clock.time_msec() self.get_success( self.store.record_event_failed_pull_attempt( room_id, "$failed_event_id", "fake cause" ) ) - event_ids_to_backoff = self.get_success( + event_ids_with_backoff = self.get_success( self.store.get_event_ids_to_not_pull_from_backoff( room_id=room_id, event_ids=["$failed_event_id", "$normal_event_id"] ) ) - self.assertEqual(event_ids_to_backoff, ["$failed_event_id"]) + self.assertEqual( + event_ids_with_backoff, + # We expect a 2^1 hour backoff after a single failed attempt. + {"$failed_event_id": failure_time + 2 * 60 * 60 * 1000}, + ) def test_get_event_ids_to_not_pull_from_backoff_retry_after_backoff_duration( self, @@ -1179,14 +1184,14 @@ def test_get_event_ids_to_not_pull_from_backoff_retry_after_backoff_duration( # attempt (2^1 hours). self.reactor.advance(datetime.timedelta(hours=2).total_seconds()) - event_ids_to_backoff = self.get_success( + event_ids_with_backoff = self.get_success( self.store.get_event_ids_to_not_pull_from_backoff( room_id=room_id, event_ids=["$failed_event_id", "$normal_event_id"] ) ) # Since this function only returns events we should backoff from, time has # elapsed past the backoff range so there is no events to backoff from. - self.assertEqual(event_ids_to_backoff, []) + self.assertEqual(event_ids_with_backoff, {}) @attr.s(auto_attribs=True) diff --git a/tests/storage/test_keys.py b/tests/storage/test_keys.py index ba68171ad7fb..5901d80f26d7 100644 --- a/tests/storage/test_keys.py +++ b/tests/storage/test_keys.py @@ -46,10 +46,10 @@ def test_get_server_verify_keys(self) -> None: store.store_server_verify_keys( "from_server", 10, - [ - ("server1", key_id_1, FetchKeyResult(KEY_1, 100)), - ("server1", key_id_2, FetchKeyResult(KEY_2, 200)), - ], + { + ("server1", key_id_1): FetchKeyResult(KEY_1, 100), + ("server1", key_id_2): FetchKeyResult(KEY_2, 200), + }, ) ) @@ -90,10 +90,10 @@ def test_cache(self) -> None: store.store_server_verify_keys( "from_server", 0, - [ - ("srv1", key_id_1, FetchKeyResult(KEY_1, 100)), - ("srv1", key_id_2, FetchKeyResult(KEY_2, 200)), - ], + { + ("srv1", key_id_1): FetchKeyResult(KEY_1, 100), + ("srv1", key_id_2): FetchKeyResult(KEY_2, 200), + }, ) ) @@ -119,7 +119,7 @@ def test_cache(self) -> None: signedjson.key.generate_signing_key("key2") ) d = store.store_server_verify_keys( - "from_server", 10, [("srv1", key_id_2, FetchKeyResult(new_key_2, 300))] + "from_server", 10, {("srv1", key_id_2): FetchKeyResult(new_key_2, 300)} ) self.get_success(d) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 041a9787e0dd..5d9e43f9188c 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -14,7 +14,7 @@ import logging -from frozendict import frozendict +from immutabledict import immutabledict from twisted.test.proto_helpers import MemoryReactor @@ -200,7 +200,7 @@ def test_get_state_for_event(self) -> None: self.storage.state.get_state_for_event( e5.event_id, state_filter=StateFilter( - types=frozendict( + types=immutabledict( {EventTypes.Member: frozenset({self.u_alice.to_string()})} ), include_others=True, @@ -222,7 +222,7 @@ def test_get_state_for_event(self) -> None: self.storage.state.get_state_for_event( e5.event_id, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset()}), + types=immutabledict({EventTypes.Member: frozenset()}), include_others=True, ), ) @@ -248,7 +248,8 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset()}), include_others=True + types=immutabledict({EventTypes.Member: frozenset()}), + include_others=True, ), ) @@ -265,7 +266,8 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset()}), include_others=True + types=immutabledict({EventTypes.Member: frozenset()}), + include_others=True, ), ) @@ -278,7 +280,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: None}), include_others=True + types=immutabledict({EventTypes.Member: None}), include_others=True ), ) @@ -295,7 +297,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: None}), include_others=True + types=immutabledict({EventTypes.Member: None}), include_others=True ), ) @@ -315,7 +317,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset({e5.state_key})}), + types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}), include_others=True, ), ) @@ -333,7 +335,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset({e5.state_key})}), + types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}), include_others=True, ), ) @@ -347,7 +349,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset({e5.state_key})}), + types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}), include_others=False, ), ) @@ -398,7 +400,8 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset()}), include_others=True + types=immutabledict({EventTypes.Member: frozenset()}), + include_others=True, ), ) @@ -410,7 +413,8 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset()}), include_others=True + types=immutabledict({EventTypes.Member: frozenset()}), + include_others=True, ), ) @@ -423,7 +427,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: None}), include_others=True + types=immutabledict({EventTypes.Member: None}), include_others=True ), ) @@ -434,7 +438,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: None}), include_others=True + types=immutabledict({EventTypes.Member: None}), include_others=True ), ) @@ -453,7 +457,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset({e5.state_key})}), + types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}), include_others=True, ), ) @@ -465,7 +469,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset({e5.state_key})}), + types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}), include_others=True, ), ) @@ -479,7 +483,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset({e5.state_key})}), + types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}), include_others=False, ), ) @@ -491,7 +495,7 @@ def test_get_state_for_event(self) -> None: self.state_datastore._state_group_members_cache, group, state_filter=StateFilter( - types=frozendict({EventTypes.Member: frozenset({e5.state_key})}), + types=immutabledict({EventTypes.Member: frozenset({e5.state_key})}), include_others=False, ), ) diff --git a/tests/types/test_state.py b/tests/types/test_state.py index eb809f9fb739..1d89582c448d 100644 --- a/tests/types/test_state.py +++ b/tests/types/test_state.py @@ -1,4 +1,4 @@ -from frozendict import frozendict +from immutabledict import immutabledict from synapse.api.constants import EventTypes from synapse.types.state import StateFilter @@ -172,7 +172,7 @@ def test_state_filter_difference_include_other_minus_no_include_other(self) -> N }, include_others=False, ), - StateFilter(types=frozendict(), include_others=True), + StateFilter(types=immutabledict(), include_others=True), ) # (wildcard on state keys) - (no state keys) @@ -188,7 +188,7 @@ def test_state_filter_difference_include_other_minus_no_include_other(self) -> N include_others=False, ), StateFilter( - types=frozendict(), + types=immutabledict(), include_others=True, ), ) @@ -279,7 +279,7 @@ def test_state_filter_difference_include_other_minus_include_other(self) -> None {EventTypes.Member: None, EventTypes.CanonicalAlias: None}, include_others=True, ), - StateFilter(types=frozendict(), include_others=False), + StateFilter(types=immutabledict(), include_others=False), ) # (wildcard on state keys) - (specific state keys) @@ -332,7 +332,7 @@ def test_state_filter_difference_include_other_minus_include_other(self) -> None include_others=True, ), StateFilter( - types=frozendict(), + types=immutabledict(), include_others=False, ), ) @@ -403,7 +403,7 @@ def test_state_filter_difference_no_include_other_minus_include_other(self) -> N {EventTypes.Member: None, EventTypes.CanonicalAlias: None}, include_others=True, ), - StateFilter(types=frozendict(), include_others=False), + StateFilter(types=immutabledict(), include_others=False), ) # (wildcard on state keys) - (specific state keys) @@ -450,7 +450,7 @@ def test_state_filter_difference_no_include_other_minus_include_other(self) -> N include_others=True, ), StateFilter( - types=frozendict(), + types=immutabledict(), include_others=False, ), ) diff --git a/tests/unittest.py b/tests/unittest.py index f9160faa1d0c..93fee1c0e6e4 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -146,6 +146,9 @@ def setUp(orig: Callable[[], R]) -> R: % (current_context(),) ) + # Disable GC for duration of test. See below for why. + gc.disable() + old_level = logging.getLogger().level if level is not None and old_level != level: @@ -163,12 +166,19 @@ def tearDown(orig: Callable[[], R]) -> R: return orig() + # We want to force a GC to workaround problems with deferreds leaking + # logcontexts when they are GCed (see the logcontext docs). + # + # The easiest way to do this would be to do a full GC after each test + # run, but that is very expensive. Instead, we disable GC (above) for + # the duration of the test so that we only need to run a gen-0 GC, which + # is a lot quicker. + @around(self) def tearDown(orig: Callable[[], R]) -> R: ret = orig() - # force a GC to workaround problems with deferreds leaking logcontexts when - # they are GCed (see the logcontext docs) - gc.collect() + gc.collect(0) + gc.enable() set_current_context(SENTINEL_CONTEXT) return ret @@ -783,16 +793,12 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: hs.get_datastores().main.store_server_verify_keys( from_server=self.OTHER_SERVER_NAME, ts_added_ms=clock.time_msec(), - verify_keys=[ - ( - self.OTHER_SERVER_NAME, - verify_key_id, - FetchKeyResult( - verify_key=verify_key, - valid_until_ts=clock.time_msec() + 10000, - ), - ) - ], + verify_keys={ + (self.OTHER_SERVER_NAME, verify_key_id): FetchKeyResult( + verify_key=verify_key, + valid_until_ts=clock.time_msec() + 10000, + ), + }, ) )