diff --git a/.ci/scripts/postgres_exec.py b/.ci/scripts/postgres_exec.py deleted file mode 100755 index 0f39a336d52d..000000000000 --- a/.ci/scripts/postgres_exec.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import sys - -import psycopg2 - -# a very simple replacment for `psql`, to make up for the lack of the postgres client -# libraries in the synapse docker image. - -# We use "postgres" as a database because it's bound to exist and the "synapse" one -# doesn't exist yet. -db_conn = psycopg2.connect( - user="postgres", host="localhost", password="postgres", dbname="postgres" -) -db_conn.autocommit = True -cur = db_conn.cursor() -for c in sys.argv[1:]: - cur.execute(c) diff --git a/.ci/scripts/test_export_data_command.sh b/.ci/scripts/test_export_data_command.sh index 033fd3e24e09..9f6c49acff73 100755 --- a/.ci/scripts/test_export_data_command.sh +++ b/.ci/scripts/test_export_data_command.sh @@ -32,7 +32,7 @@ else fi # Create the PostgreSQL database. -poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse" +psql -c "CREATE DATABASE synapse" # Port the SQLite databse to postgres so we can check command works against postgres echo "+++ Port SQLite3 databse to postgres" diff --git a/.ci/scripts/test_synapse_port_db.sh b/.ci/scripts/test_synapse_port_db.sh index b07a6b5d0862..8cc41d3dca4f 100755 --- a/.ci/scripts/test_synapse_port_db.sh +++ b/.ci/scripts/test_synapse_port_db.sh @@ -2,27 +2,27 @@ # # Test script for 'synapse_port_db'. # - configures synapse and a postgres server. -# - runs the port script on a prepopulated test sqlite db -# - also runs it against an new sqlite db +# - runs the port script on a prepopulated test sqlite db. Checks that the +# return code is zero. +# - reruns the port script on the same sqlite db, targetting the same postgres db. +# Checks that the return code is zero. +# - runs the port script against a new sqlite db. Checks the return code is zero. # # Expects Synapse to have been already installed with `poetry install --extras postgres`. # Expects `poetry` to be available on the `PATH`. -set -xe +set -xe -o pipefail cd "$(dirname "$0")/../.." echo "--- Generate the signing key" - -# Generate the server's signing key. poetry run synapse_homeserver --generate-keys -c .ci/sqlite-config.yaml echo "--- Prepare test database" - -# Make sure the SQLite3 database is using the latest schema and has no pending background update. +# Make sure the SQLite3 database is using the latest schema and has no pending background updates. poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # Create the PostgreSQL database. -poetry run .ci/scripts/postgres_exec.py "CREATE DATABASE synapse" +psql -c "CREATE DATABASE synapse" echo "+++ Run synapse_port_db against test database" # TODO: this invocation of synapse_port_db (and others below) used to be prepended with `coverage run`, @@ -45,9 +45,23 @@ rm .ci/test_db.db poetry run update_synapse_database --database-config .ci/sqlite-config.yaml --run-background-updates # re-create the PostgreSQL database. -poetry run .ci/scripts/postgres_exec.py \ - "DROP DATABASE synapse" \ - "CREATE DATABASE synapse" +psql \ + -c "DROP DATABASE synapse" \ + -c "CREATE DATABASE synapse" echo "+++ Run synapse_port_db against empty database" poetry run synapse_port_db --sqlite-database .ci/test_db.db --postgres-config .ci/postgres-config.yaml + +echo "--- Create a brand new postgres database from schema" +cp .ci/postgres-config.yaml .ci/postgres-config-unported.yaml +sed -i -e 's/database: synapse/database: synapse_unported/' .ci/postgres-config-unported.yaml +psql -c "CREATE DATABASE synapse_unported" +poetry run update_synapse_database --database-config .ci/postgres-config-unported.yaml --run-background-updates + +echo "+++ Comparing ported schema with unported schema" +# Ignore the tables that portdb creates. (Should it tidy them up when the porting is completed?) +psql synapse -c "DROP TABLE port_from_sqlite3;" +pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse_unported > unported.sql +pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner synapse > ported.sql +# By default, `diff` returns zero if there are no changes and nonzero otherwise +diff -u unported.sql ported.sql | tee schema_diff \ No newline at end of file diff --git a/.dockerignore b/.dockerignore index 8eb1e4df8a9f..5670b8c15bf2 100644 --- a/.dockerignore +++ b/.dockerignore @@ -11,5 +11,6 @@ !build_rust.py rust/target +synapse/*.so **/__pycache__ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a5a217d01521..91a080cca0e3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -32,9 +32,11 @@ jobs: steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - - run: pip install . - - run: scripts-dev/generate_sample_config.sh --check - - run: scripts-dev/config-lint.sh + - uses: matrix-org/setup-python-poetry@v1 + with: + extras: "all" + - run: poetry run scripts-dev/generate_sample_config.sh --check + - run: poetry run scripts-dev/config-lint.sh check-schema-delta: runs-on: ubuntu-latest @@ -76,7 +78,6 @@ jobs: - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} - fetch-depth: 0 - uses: matrix-org/setup-python-poetry@v1 with: extras: "all" @@ -361,18 +362,22 @@ jobs: steps: - uses: actions/checkout@v2 - - run: sudo apt-get -qq install xmlsec1 + - run: sudo apt-get -qq install xmlsec1 postgresql-client - uses: matrix-org/setup-python-poetry@v1 with: extras: "postgres" - run: .ci/scripts/test_export_data_command.sh + env: + PGHOST: localhost + PGUSER: postgres + PGPASSWORD: postgres + PGDATABASE: postgres + portdb: if: ${{ !failure() && !cancelled() }} # Allow previous steps to be skipped, but not fail needs: linting-done runs-on: ubuntu-latest - env: - TOP: ${{ github.workspace }} strategy: matrix: include: @@ -398,12 +403,27 @@ jobs: steps: - uses: actions/checkout@v2 - - run: sudo apt-get -qq install xmlsec1 + - run: sudo apt-get -qq install xmlsec1 postgresql-client - uses: matrix-org/setup-python-poetry@v1 with: python-version: ${{ matrix.python-version }} extras: "postgres" - run: .ci/scripts/test_synapse_port_db.sh + id: run_tester_script + env: + PGHOST: localhost + PGUSER: postgres + PGPASSWORD: postgres + PGDATABASE: postgres + - name: "Upload schema differences" + uses: actions/upload-artifact@v3 + if: ${{ failure() && !cancelled() && steps.run_tester_script.outcome == 'failure' }} + with: + name: Schema dumps + path: | + unported.sql + ported.sql + schema_diff complement: if: "${{ !failure() && !cancelled() }}" diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 000000000000..bf96e7743de0 --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1 @@ +group_imports = "StdExternalCrate" diff --git a/CHANGES.md b/CHANGES.md index be44903bfe83..fb91bc5f20c0 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,92 @@ +Synapse 1.68.0rc1 (2022-09-20) +============================== + +Please note that Synapse will now refuse to start if configured to use a version of SQLite earlier than 3.27. + +In addition, please note that installing Synapse from a source checkout now requires a recent Rust compiler. +Those using packages will not be affected. On most platforms, installing with `pip install matrix-synapse` will not be affected. +See the [upgrade notes](https://matrix-org.github.io/synapse/v1.68/upgrade.html#upgrading-to-v1670). + + +Features +-------- + +- Keep track of when we fail to process a pulled event over federation so we can intelligently back off in the future. ([\#13589](https://github.com/matrix-org/synapse/issues/13589), [\#13814](https://github.com/matrix-org/synapse/issues/13814)) +- Add an [admin API endpoint to fetch messages within a particular window of time](https://matrix-org.github.io/synapse/v1.68/admin_api/rooms.html#room-messages-api). ([\#13672](https://github.com/matrix-org/synapse/issues/13672)) +- Add an [admin API endpoint to find a user based on their external ID in an auth provider](https://matrix-org.github.io/synapse/v1.68/admin_api/user_admin_api.html#find-a-user-based-on-their-id-in-an-auth-provider). ([\#13810](https://github.com/matrix-org/synapse/issues/13810)) +- Cancel the processing of key query requests when they time out. ([\#13680](https://github.com/matrix-org/synapse/issues/13680)) +- Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken), [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status), [`/account/3pid/add`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidadd), [`/account/3pid/bind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidbind), [`/account/3pid/delete`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3piddelete) and [`/account/3pid/unbind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidunbind). ([\#13687](https://github.com/matrix-org/synapse/issues/13687), [\#13736](https://github.com/matrix-org/synapse/issues/13736)) +- Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. ([\#13741](https://github.com/matrix-org/synapse/issues/13741)) +- Add a `listeners[x].request_id_header` configuration option to specify which request header to extract and use as the request ID in order to correlate requests from a reverse proxy. ([\#13801](https://github.com/matrix-org/synapse/issues/13801)) + + +Bugfixes +-------- + +- Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). ([\#13506](https://github.com/matrix-org/synapse/issues/13506)) +- Fix a long-standing bug where previously rejected events could end up in room state because they pass auth checks given the current state of the room. ([\#13723](https://github.com/matrix-org/synapse/issues/13723)) +- Fix a long-standing bug where Synapse fails to start if a signing key file contains an empty line. ([\#13738](https://github.com/matrix-org/synapse/issues/13738)) +- Fix a long-standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. ([\#13746](https://github.com/matrix-org/synapse/issues/13746)) +- Fix a long-standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. ([\#13749](https://github.com/matrix-org/synapse/issues/13749), [\#13826](https://github.com/matrix-org/synapse/issues/13826)) +- Fix a long-standing bug that could cause stale caches in some rare cases on the first startup of Synapse with replication. ([\#13766](https://github.com/matrix-org/synapse/issues/13766)) +- Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. ([\#13789](https://github.com/matrix-org/synapse/issues/13789)) +- Delete associated data from `event_failed_pull_attempts`, `insertion_events`, `insertion_event_extremities`, `insertion_event_extremities`, `insertion_event_extremities` when purging the room. ([\#13825](https://github.com/matrix-org/synapse/issues/13825)) + + +Improved Documentation +---------------------- + +- Note that `libpq` is required on ARM-based Macs. ([\#13480](https://github.com/matrix-org/synapse/issues/13480)) +- Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse v1.22.0. ([\#13726](https://github.com/matrix-org/synapse/issues/13726)) +- Fix a typo in the documentation for the login ratelimiting configuration. ([\#13727](https://github.com/matrix-org/synapse/issues/13727)) +- Define Synapse's compatability policy for SQLite versions. ([\#13728](https://github.com/matrix-org/synapse/issues/13728)) +- Add docs for common fix of deleting the `matrix_synapse.egg-info/` directory for fixing Python dependency problems. ([\#13785](https://github.com/matrix-org/synapse/issues/13785)) +- Update request log format documentation to mention the format used when the authenticated user is controlling another user. ([\#13794](https://github.com/matrix-org/synapse/issues/13794)) + + +Deprecations and Removals +------------------------- + +- Synapse will now refuse to start if configured to use SQLite < 3.27. ([\#13760](https://github.com/matrix-org/synapse/issues/13760)) +- Don't include redundant `prev_state` in new events. Contributed by Denis Kariakin (@dakariakin). ([\#13791](https://github.com/matrix-org/synapse/issues/13791)) + + +Internal Changes +---------------- + +- Add a stub Rust crate. ([\#12595](https://github.com/matrix-org/synapse/issues/12595), [\#13734](https://github.com/matrix-org/synapse/issues/13734), [\#13735](https://github.com/matrix-org/synapse/issues/13735), [\#13743](https://github.com/matrix-org/synapse/issues/13743), [\#13763](https://github.com/matrix-org/synapse/issues/13763), [\#13769](https://github.com/matrix-org/synapse/issues/13769), [\#13778](https://github.com/matrix-org/synapse/issues/13778)) +- Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code. ([\#13162](https://github.com/matrix-org/synapse/issues/13162)) +- Add and populate the `event_stream_ordering` column on the `receipts` table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). ([\#13703](https://github.com/matrix-org/synapse/issues/13703)) +- Rename the `EventFormatVersions` enum values so that they line up with room version numbers. ([\#13706](https://github.com/matrix-org/synapse/issues/13706)) +- Update trial old deps CI to use Poetry 1.2.0. ([\#13707](https://github.com/matrix-org/synapse/issues/13707), [\#13725](https://github.com/matrix-org/synapse/issues/13725)) +- Add experimental configuration option to allow disabling legacy Prometheus metric names. ([\#13714](https://github.com/matrix-org/synapse/issues/13714), [\#13717](https://github.com/matrix-org/synapse/issues/13717), [\#13718](https://github.com/matrix-org/synapse/issues/13718)) +- Fix typechecking with latest types-jsonschema. ([\#13724](https://github.com/matrix-org/synapse/issues/13724)) +- Strip number suffix from instance name to consolidate services that traces are spread over. ([\#13729](https://github.com/matrix-org/synapse/issues/13729)) +- Instrument `get_metadata_for_events` for understandable traces in Jaeger. ([\#13730](https://github.com/matrix-org/synapse/issues/13730)) +- Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). ([\#13745](https://github.com/matrix-org/synapse/issues/13745)) +- Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. ([\#13748](https://github.com/matrix-org/synapse/issues/13748)) +- Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. ([\#13750](https://github.com/matrix-org/synapse/issues/13750)) +- Use an additional database query when persisting receipts. ([\#13752](https://github.com/matrix-org/synapse/issues/13752)) +- Preparatory work for storing thread IDs for notifications and receipts. ([\#13753](https://github.com/matrix-org/synapse/issues/13753)) +- Re-type hint some collections as read-only. ([\#13754](https://github.com/matrix-org/synapse/issues/13754)) +- Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. ([\#13756](https://github.com/matrix-org/synapse/issues/13756)) +- Add a check for editable installs if the Rust library needs rebuilding. ([\#13759](https://github.com/matrix-org/synapse/issues/13759)) +- Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. ([\#13761](https://github.com/matrix-org/synapse/issues/13761)) +- Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). ([\#13765](https://github.com/matrix-org/synapse/issues/13765)) +- Update the script which makes full schema dumps. ([\#13770](https://github.com/matrix-org/synapse/issues/13770)) +- Deduplicate `is_server_notices_room`. ([\#13780](https://github.com/matrix-org/synapse/issues/13780)) +- Simplify the dependency DAG in the tests workflow. ([\#13784](https://github.com/matrix-org/synapse/issues/13784)) +- Remove an old, incorrect migration file. ([\#13788](https://github.com/matrix-org/synapse/issues/13788)) +- Remove unused method in `synapse.api.auth.Auth`. ([\#13795](https://github.com/matrix-org/synapse/issues/13795)) +- Fix a memory leak when running the unit tests. ([\#13798](https://github.com/matrix-org/synapse/issues/13798)) +- Use partial indices on SQLite. ([\#13802](https://github.com/matrix-org/synapse/issues/13802)) +- Check that portdb generates the same postgres schema as that in the source tree. ([\#13808](https://github.com/matrix-org/synapse/issues/13808)) +- Fix Docker build when Rust .so has been build locally first. ([\#13811](https://github.com/matrix-org/synapse/issues/13811)) +- Complement: Initialise the Postgres database directly inside the target image instead of the base Postgres image to fix building using Buildah. ([\#13819](https://github.com/matrix-org/synapse/issues/13819)) +- Support providing an index predicate clause when doing upserts. ([\#13822](https://github.com/matrix-org/synapse/issues/13822)) +- Minor speedups to linting in CI. ([\#13827](https://github.com/matrix-org/synapse/issues/13827)) + + Synapse 1.67.0 (2022-09-13) =========================== diff --git a/changelog.d/12595.misc b/changelog.d/12595.misc deleted file mode 100644 index 2e0dd68a0f94..000000000000 --- a/changelog.d/12595.misc +++ /dev/null @@ -1 +0,0 @@ -Add a stub Rust crate. diff --git a/changelog.d/13162.misc b/changelog.d/13162.misc deleted file mode 100644 index b0d7c05e749c..000000000000 --- a/changelog.d/13162.misc +++ /dev/null @@ -1 +0,0 @@ -Bump the minimum dependency of `matrix_common` to 1.3.0 to make use of the `MXCUri` class. Use `MXCUri` to simplify media retention test code. \ No newline at end of file diff --git a/changelog.d/13480.doc b/changelog.d/13480.doc deleted file mode 100644 index ae5df16367c9..000000000000 --- a/changelog.d/13480.doc +++ /dev/null @@ -1 +0,0 @@ -Note that `libpq` is required on ARM-based Macs. diff --git a/changelog.d/13506.bugfix b/changelog.d/13506.bugfix deleted file mode 100644 index 2e43668865b9..000000000000 --- a/changelog.d/13506.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.41.0 where the `/hierarchy` API returned non-standard information (a `room_id` field under each entry in `children_state`). \ No newline at end of file diff --git a/changelog.d/13589.feature b/changelog.d/13589.feature deleted file mode 100644 index 78fa1ddb5202..000000000000 --- a/changelog.d/13589.feature +++ /dev/null @@ -1 +0,0 @@ -Keep track when we attempt to backfill an event but fail so we can intelligently back-off in the future. diff --git a/changelog.d/13667.feature b/changelog.d/13667.feature new file mode 100644 index 000000000000..a0b3cfe18c00 --- /dev/null +++ b/changelog.d/13667.feature @@ -0,0 +1 @@ +Add cache invalidation across workers to module API. diff --git a/changelog.d/13672.feature b/changelog.d/13672.feature deleted file mode 100644 index 2334e6fe1563..000000000000 --- a/changelog.d/13672.feature +++ /dev/null @@ -1 +0,0 @@ -Add admin APIs to fetch messages within a particular window of time. diff --git a/changelog.d/13680.feature b/changelog.d/13680.feature deleted file mode 100644 index 4234c7e0825c..000000000000 --- a/changelog.d/13680.feature +++ /dev/null @@ -1 +0,0 @@ -Cancel the processing of key query requests when they time out. \ No newline at end of file diff --git a/changelog.d/13687.feature b/changelog.d/13687.feature deleted file mode 100644 index dac53ec122c4..000000000000 --- a/changelog.d/13687.feature +++ /dev/null @@ -1 +0,0 @@ -Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/msisdn/requestToken`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidmsisdnrequesttoken) and [`/org.matrix.msc3720/account_status`](https://github.com/matrix-org/matrix-spec-proposals/blob/babolivier/user_status/proposals/3720-account-status.md#post-_matrixclientv1account_status). \ No newline at end of file diff --git a/changelog.d/13703.misc b/changelog.d/13703.misc deleted file mode 100644 index 685a29b17d4b..000000000000 --- a/changelog.d/13703.misc +++ /dev/null @@ -1 +0,0 @@ -Add & populate `event_stream_ordering` column on receipts table for future optimisation of push action processing. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13706.misc b/changelog.d/13706.misc deleted file mode 100644 index 65c854c7a928..000000000000 --- a/changelog.d/13706.misc +++ /dev/null @@ -1 +0,0 @@ -Rename the `EventFormatVersions` enum values so that they line up with room version numbers. \ No newline at end of file diff --git a/changelog.d/13707.misc b/changelog.d/13707.misc deleted file mode 100644 index e72c322d2e28..000000000000 --- a/changelog.d/13707.misc +++ /dev/null @@ -1 +0,0 @@ -Update trial old deps CI to use poetry 1.2.0. diff --git a/changelog.d/13714.misc b/changelog.d/13714.misc deleted file mode 100644 index 07ace50b12a0..000000000000 --- a/changelog.d/13714.misc +++ /dev/null @@ -1 +0,0 @@ -Add experimental configuration option to allow disabling legacy Prometheus metric names. \ No newline at end of file diff --git a/changelog.d/13717.misc b/changelog.d/13717.misc deleted file mode 100644 index 07ace50b12a0..000000000000 --- a/changelog.d/13717.misc +++ /dev/null @@ -1 +0,0 @@ -Add experimental configuration option to allow disabling legacy Prometheus metric names. \ No newline at end of file diff --git a/changelog.d/13718.misc b/changelog.d/13718.misc deleted file mode 100644 index 07ace50b12a0..000000000000 --- a/changelog.d/13718.misc +++ /dev/null @@ -1 +0,0 @@ -Add experimental configuration option to allow disabling legacy Prometheus metric names. \ No newline at end of file diff --git a/changelog.d/13722.feature b/changelog.d/13722.feature new file mode 100644 index 000000000000..588d143c0fe4 --- /dev/null +++ b/changelog.d/13722.feature @@ -0,0 +1 @@ +Experimental implementation of MSC3882 to allow an existing device/session to generate a login token for use on a new device/session. diff --git a/changelog.d/13724.misc b/changelog.d/13724.misc deleted file mode 100644 index 2c4f6b19f6f6..000000000000 --- a/changelog.d/13724.misc +++ /dev/null @@ -1 +0,0 @@ -Fix typechecking with latest types-jsonschema. diff --git a/changelog.d/13725.misc b/changelog.d/13725.misc deleted file mode 100644 index e72c322d2e28..000000000000 --- a/changelog.d/13725.misc +++ /dev/null @@ -1 +0,0 @@ -Update trial old deps CI to use poetry 1.2.0. diff --git a/changelog.d/13726.doc b/changelog.d/13726.doc deleted file mode 100644 index ab840e1a92ae..000000000000 --- a/changelog.d/13726.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a mistake in the config manual: the `event_cache_size` _is_ scaled by `caches.global_factor`. The documentation was incorrect since Synapse 1.22. diff --git a/changelog.d/13727.doc b/changelog.d/13727.doc deleted file mode 100644 index ba530b409dd1..000000000000 --- a/changelog.d/13727.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a typo in the documentation for the login ratelimiting configuration. diff --git a/changelog.d/13728.doc b/changelog.d/13728.doc deleted file mode 100644 index 75ca7b7ec36a..000000000000 --- a/changelog.d/13728.doc +++ /dev/null @@ -1 +0,0 @@ -Define Synapse's compatability policy for SQLite versions. diff --git a/changelog.d/13729.misc b/changelog.d/13729.misc deleted file mode 100644 index c6a6f617e337..000000000000 --- a/changelog.d/13729.misc +++ /dev/null @@ -1 +0,0 @@ -Strip number suffix from instance name to consolidate services that traces are spread over. diff --git a/changelog.d/13730.misc b/changelog.d/13730.misc deleted file mode 100644 index 06da6581a4d3..000000000000 --- a/changelog.d/13730.misc +++ /dev/null @@ -1 +0,0 @@ -Instrument `get_metadata_for_events` for understandable traces in Jaeger. diff --git a/changelog.d/13734.misc b/changelog.d/13734.misc deleted file mode 100644 index 2e0dd68a0f94..000000000000 --- a/changelog.d/13734.misc +++ /dev/null @@ -1 +0,0 @@ -Add a stub Rust crate. diff --git a/changelog.d/13735.misc b/changelog.d/13735.misc deleted file mode 100644 index 2e0dd68a0f94..000000000000 --- a/changelog.d/13735.misc +++ /dev/null @@ -1 +0,0 @@ -Add a stub Rust crate. diff --git a/changelog.d/13736.feature b/changelog.d/13736.feature deleted file mode 100644 index 60a63c100929..000000000000 --- a/changelog.d/13736.feature +++ /dev/null @@ -1 +0,0 @@ -Improve validation of request bodies for the following client-server API endpoints: [`/account/3pid/add`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidadd), [`/account/3pid/bind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidbind), [`/account/3pid/delete`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3piddelete) and [`/account/3pid/unbind`](https://spec.matrix.org/v1.3/client-server-api/#post_matrixclientv3account3pidunbind). diff --git a/changelog.d/13738.bugfix b/changelog.d/13738.bugfix deleted file mode 100644 index d64fa0b4dec6..000000000000 --- a/changelog.d/13738.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where Synapse fails to start if a signing key file contains an empty line. \ No newline at end of file diff --git a/changelog.d/13741.feature b/changelog.d/13741.feature deleted file mode 100644 index dff46f373fa0..000000000000 --- a/changelog.d/13741.feature +++ /dev/null @@ -1 +0,0 @@ -Document the timestamp when a user accepts the consent, if [consent tracking](https://matrix-org.github.io/synapse/latest/consent_tracking.html) is used. \ No newline at end of file diff --git a/changelog.d/13743.misc b/changelog.d/13743.misc deleted file mode 100644 index 2e0dd68a0f94..000000000000 --- a/changelog.d/13743.misc +++ /dev/null @@ -1 +0,0 @@ -Add a stub Rust crate. diff --git a/changelog.d/13745.misc b/changelog.d/13745.misc deleted file mode 100644 index e97a789c0eb5..000000000000 --- a/changelog.d/13745.misc +++ /dev/null @@ -1 +0,0 @@ -Remove old queries to join room memberships to current state events. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13746.bugfix b/changelog.d/13746.bugfix deleted file mode 100644 index b692af8fd506..000000000000 --- a/changelog.d/13746.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long standing bug where Synapse would fail to handle malformed user IDs or room aliases gracefully in certain cases. diff --git a/changelog.d/13748.misc b/changelog.d/13748.misc deleted file mode 100644 index 2f419bb659a1..000000000000 --- a/changelog.d/13748.misc +++ /dev/null @@ -1 +0,0 @@ -Avoid raising an error due to malformed user IDs in `get_current_hosts_in_room`. Malformed user IDs cannot currently join a room, so this error would not be hit. diff --git a/changelog.d/13749.bugfix b/changelog.d/13749.bugfix deleted file mode 100644 index 8ffafec07b33..000000000000 --- a/changelog.d/13749.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long standing bug where device lists would remain cached when remote users left and rejoined the last room shared with the local homeserver. diff --git a/changelog.d/13750.misc b/changelog.d/13750.misc deleted file mode 100644 index 3bccc21fc5c3..000000000000 --- a/changelog.d/13750.misc +++ /dev/null @@ -1 +0,0 @@ -Update the docstrings for `get_users_in_room` and `get_current_hosts_in_room` to explain the impact of partial state. diff --git a/changelog.d/13752.misc b/changelog.d/13752.misc deleted file mode 100644 index 7624861b9f4e..000000000000 --- a/changelog.d/13752.misc +++ /dev/null @@ -1 +0,0 @@ -User an additional database query when persisting receipts. diff --git a/changelog.d/13753.misc b/changelog.d/13753.misc deleted file mode 100644 index 63de2eb9f91e..000000000000 --- a/changelog.d/13753.misc +++ /dev/null @@ -1 +0,0 @@ -Prepatory work for storing thread IDs for notifications and receipts. diff --git a/changelog.d/13754.misc b/changelog.d/13754.misc deleted file mode 100644 index 662ee00e99d5..000000000000 --- a/changelog.d/13754.misc +++ /dev/null @@ -1 +0,0 @@ -Re-type hint some collections as read-only. diff --git a/changelog.d/13756.misc b/changelog.d/13756.misc deleted file mode 100644 index 06e9cd09bf93..000000000000 --- a/changelog.d/13756.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused Prometheus recording rules from `synapse-v2.rules` and add comments describing where the rest are used. \ No newline at end of file diff --git a/changelog.d/13759.misc b/changelog.d/13759.misc deleted file mode 100644 index f91c51248309..000000000000 --- a/changelog.d/13759.misc +++ /dev/null @@ -1 +0,0 @@ -Add a check for editable installs if the Rust library needs rebuilding. diff --git a/changelog.d/13760.removal b/changelog.d/13760.removal deleted file mode 100644 index 624e7c367846..000000000000 --- a/changelog.d/13760.removal +++ /dev/null @@ -1 +0,0 @@ -Synapse will now refuse to start if configured to use SQLite < 3.27. diff --git a/changelog.d/13761.misc b/changelog.d/13761.misc deleted file mode 100644 index f7aa8c459a44..000000000000 --- a/changelog.d/13761.misc +++ /dev/null @@ -1 +0,0 @@ -Tag traces with the instance name to be able to easily jump into the right logs and filter traces by instance. diff --git a/changelog.d/13763.misc b/changelog.d/13763.misc deleted file mode 100644 index 2e0dd68a0f94..000000000000 --- a/changelog.d/13763.misc +++ /dev/null @@ -1 +0,0 @@ -Add a stub Rust crate. diff --git a/changelog.d/13765.misc b/changelog.d/13765.misc deleted file mode 100644 index fdda5cf3b6a3..000000000000 --- a/changelog.d/13765.misc +++ /dev/null @@ -1 +0,0 @@ -Concurrently fetch room push actions when calculating badge counts. Contributed by Nick @ Beeper (@fizzadar). diff --git a/changelog.d/13766.bugfix b/changelog.d/13766.bugfix deleted file mode 100644 index c708e54f9c81..000000000000 --- a/changelog.d/13766.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug where the `cache_invalidation_stream_seq` sequence would begin at 1 instead of 2. diff --git a/changelog.d/13768.misc b/changelog.d/13768.misc new file mode 100644 index 000000000000..28bddb705967 --- /dev/null +++ b/changelog.d/13768.misc @@ -0,0 +1 @@ +Port push rules to using Rust. diff --git a/changelog.d/13769.misc b/changelog.d/13769.misc deleted file mode 100644 index 2e0dd68a0f94..000000000000 --- a/changelog.d/13769.misc +++ /dev/null @@ -1 +0,0 @@ -Add a stub Rust crate. diff --git a/changelog.d/13772.doc b/changelog.d/13772.doc new file mode 100644 index 000000000000..3398ff376555 --- /dev/null +++ b/changelog.d/13772.doc @@ -0,0 +1 @@ +Add `worker_main_http_uri` for the worker generator bash script. diff --git a/changelog.d/13778.misc b/changelog.d/13778.misc deleted file mode 100644 index 2e0dd68a0f94..000000000000 --- a/changelog.d/13778.misc +++ /dev/null @@ -1 +0,0 @@ -Add a stub Rust crate. diff --git a/changelog.d/13780.misc b/changelog.d/13780.misc deleted file mode 100644 index 1bcac51cad46..000000000000 --- a/changelog.d/13780.misc +++ /dev/null @@ -1 +0,0 @@ -Deduplicate `is_server_notices_room`. \ No newline at end of file diff --git a/changelog.d/13784.misc b/changelog.d/13784.misc deleted file mode 100644 index e7a542cd809e..000000000000 --- a/changelog.d/13784.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify the dependency DAG in the tests workflow. diff --git a/changelog.d/13788.misc b/changelog.d/13788.misc deleted file mode 100644 index 7263b1ac5245..000000000000 --- a/changelog.d/13788.misc +++ /dev/null @@ -1 +0,0 @@ -Remove an old, incorrect migration file. diff --git a/changelog.d/13789.bugfix b/changelog.d/13789.bugfix deleted file mode 100644 index 9e1e3e0fa7bf..000000000000 --- a/changelog.d/13789.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing spec compliance bug where Synapse would accept a trailing slash on the end of `/get_missing_events` federation requests. \ No newline at end of file diff --git a/changelog.d/13770.misc b/changelog.d/13792.misc similarity index 100% rename from changelog.d/13770.misc rename to changelog.d/13792.misc diff --git a/changelog.d/13795.misc b/changelog.d/13795.misc deleted file mode 100644 index 20d90cc130ee..000000000000 --- a/changelog.d/13795.misc +++ /dev/null @@ -1 +0,0 @@ -Remove unused method in `synapse.api.auth.Auth`. diff --git a/changelog.d/13798.misc b/changelog.d/13798.misc deleted file mode 100644 index e4ec2d77d666..000000000000 --- a/changelog.d/13798.misc +++ /dev/null @@ -1 +0,0 @@ -Fix a memory leak when running the unit tests. \ No newline at end of file diff --git a/changelog.d/13799.feature b/changelog.d/13799.feature new file mode 100644 index 000000000000..6c8e5cffe281 --- /dev/null +++ b/changelog.d/13799.feature @@ -0,0 +1 @@ +Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). diff --git a/changelog.d/13802.misc b/changelog.d/13802.misc deleted file mode 100644 index 0d550713260f..000000000000 --- a/changelog.d/13802.misc +++ /dev/null @@ -1 +0,0 @@ -Use partial indices on SQLite. diff --git a/changelog.d/13809.misc b/changelog.d/13809.misc new file mode 100644 index 000000000000..c2dacca2f25e --- /dev/null +++ b/changelog.d/13809.misc @@ -0,0 +1 @@ +Improve the `synapse.api.auth.Auth` mock used in unit tests. diff --git a/changelog.d/13822.misc b/changelog.d/13822.misc deleted file mode 100644 index dbc77cbcfabe..000000000000 --- a/changelog.d/13822.misc +++ /dev/null @@ -1 +0,0 @@ -Support providing an index predicate clause when doing upserts. diff --git a/changelog.d/13831.feature b/changelog.d/13831.feature new file mode 100644 index 000000000000..6c8e5cffe281 --- /dev/null +++ b/changelog.d/13831.feature @@ -0,0 +1 @@ +Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). diff --git a/changelog.d/13832.feature b/changelog.d/13832.feature new file mode 100644 index 000000000000..1dc1d66efeb5 --- /dev/null +++ b/changelog.d/13832.feature @@ -0,0 +1 @@ +Improve validation for the unspecced, internal-only `_matrix/client/unstable/add_threepid/msisdn/submit_token` endpoint. diff --git a/changelog.d/13836.doc b/changelog.d/13836.doc new file mode 100644 index 000000000000..f2edab00f4ed --- /dev/null +++ b/changelog.d/13836.doc @@ -0,0 +1 @@ +Fix a mistake in sso_mapping_providers.md: `map_user_attributes` is expected to return `display_name` not `displayname`. diff --git a/changelog.d/13840.bugfix b/changelog.d/13840.bugfix new file mode 100644 index 000000000000..0f014439a8ed --- /dev/null +++ b/changelog.d/13840.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.53.0 where the experimental implementation of [MSC3715](https://github.com/matrix-org/matrix-spec-proposals/pull/3715) would give incorrect results when paginating forward. diff --git a/changelog.d/13843.removal b/changelog.d/13843.removal new file mode 100644 index 000000000000..f6caaa889566 --- /dev/null +++ b/changelog.d/13843.removal @@ -0,0 +1 @@ +Remove the `complete_sso_login` method from the Module API which was deprecated in Synapse 1.13.0. diff --git a/changelog.d/13850.misc b/changelog.d/13850.misc new file mode 100644 index 000000000000..a973118aaf28 --- /dev/null +++ b/changelog.d/13850.misc @@ -0,0 +1 @@ +Fix the release script not publishing binary wheels. \ No newline at end of file diff --git a/changelog.d/13860.feature b/changelog.d/13860.feature new file mode 100644 index 000000000000..6c8e5cffe281 --- /dev/null +++ b/changelog.d/13860.feature @@ -0,0 +1 @@ +Add experimental support for [MSC3881: Remotely toggle push notifications for another client](https://github.com/matrix-org/matrix-spec-proposals/pull/3881). diff --git a/contrib/workers-bash-scripts/create-multiple-generic-workers.md b/contrib/workers-bash-scripts/create-multiple-generic-workers.md index d30310142946..c9be707b3c6e 100644 --- a/contrib/workers-bash-scripts/create-multiple-generic-workers.md +++ b/contrib/workers-bash-scripts/create-multiple-generic-workers.md @@ -7,7 +7,7 @@ You can alternatively create multiple worker configuration files with a simple ` #!/bin/bash for i in {1..5} do -cat << EOF >> generic_worker$i.yaml +cat << EOF > generic_worker$i.yaml worker_app: synapse.app.generic_worker worker_name: generic_worker$i @@ -15,6 +15,8 @@ worker_name: generic_worker$i worker_replication_host: 127.0.0.1 worker_replication_http_port: 9093 +worker_main_http_uri: http://localhost:8008/ + worker_listeners: - type: http port: 808$i diff --git a/debian/changelog b/debian/changelog index 0b2ad35bc1b0..6325ce29942c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.68.0~rc1) stable; urgency=medium + + * New Synapse release 1.68.0rc1. + + -- Synapse Packaging team Tue, 20 Sep 2022 11:18:20 +0100 + matrix-synapse-py3 (1.67.0) stable; urgency=medium * New Synapse release 1.67.0. diff --git a/docker/Dockerfile b/docker/Dockerfile index a057bf397b18..b20951d4cf62 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -31,7 +31,9 @@ ARG PYTHON_VERSION=3.9 ### ### Stage 0: generate requirements.txt ### -FROM docker.io/python:${PYTHON_VERSION}-slim as requirements +# We hardcode the use of Debian bullseye here because this could change upstream +# and other Dockerfiles used for testing are expecting bullseye. +FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as requirements # RUN --mount is specific to buildkit and is documented at # https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#build-mounts-run---mount. @@ -76,7 +78,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ ### ### Stage 1: builder ### -FROM docker.io/python:${PYTHON_VERSION}-slim as builder +FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye as builder # install the OS build deps RUN \ @@ -137,7 +139,7 @@ RUN if [ -z "$TEST_ONLY_IGNORE_POETRY_LOCKFILE" ]; then \ ### Stage 2: runtime ### -FROM docker.io/python:${PYTHON_VERSION}-slim +FROM docker.io/python:${PYTHON_VERSION}-slim-bullseye LABEL org.opencontainers.image.url='https://matrix.org/docs/projects/server/synapse' LABEL org.opencontainers.image.documentation='https://github.com/matrix-org/synapse/blob/master/docker/README.md' diff --git a/docker/complement/Dockerfile b/docker/complement/Dockerfile index 3cfff19f9acd..0e13722d1c00 100644 --- a/docker/complement/Dockerfile +++ b/docker/complement/Dockerfile @@ -17,26 +17,24 @@ ARG SYNAPSE_VERSION=latest # the same debian version as Synapse's docker image (so the versions of the # shared libraries match). -FROM postgres:13-bullseye AS postgres_base - # initialise the database cluster in /var/lib/postgresql - RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password - - # Configure a password and create a database for Synapse - RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single - RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single - # now build the final image, based on the Synapse image. FROM matrixdotorg/synapse-workers:$SYNAPSE_VERSION # copy the postgres installation over from the image we built above RUN adduser --system --uid 999 postgres --home /var/lib/postgresql - COPY --from=postgres_base /var/lib/postgresql /var/lib/postgresql - COPY --from=postgres_base /usr/lib/postgresql /usr/lib/postgresql - COPY --from=postgres_base /usr/share/postgresql /usr/share/postgresql + COPY --from=postgres:13-bullseye /usr/lib/postgresql /usr/lib/postgresql + COPY --from=postgres:13-bullseye /usr/share/postgresql /usr/share/postgresql RUN mkdir /var/run/postgresql && chown postgres /var/run/postgresql ENV PATH="${PATH}:/usr/lib/postgresql/13/bin" ENV PGDATA=/var/lib/postgresql/data + # initialise the database cluster in /var/lib/postgresql + RUN gosu postgres initdb --locale=C --encoding=UTF-8 --auth-host password + + # Configure a password and create a database for Synapse + RUN echo "ALTER USER postgres PASSWORD 'somesecret'" | gosu postgres postgres --single + RUN echo "CREATE DATABASE synapse" | gosu postgres postgres --single + # Extend the shared homeserver config to disable rate-limiting, # set Complement's static shared secret, enable registration, amongst other # tweaks to get Synapse ready for testing. diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 975f05c929a5..3625c7b6c5f5 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -1155,3 +1155,41 @@ GET /_synapse/admin/v1/username_available?username=$localpart The request and response format is the same as the [/_matrix/client/r0/register/available](https://matrix.org/docs/spec/client_server/r0.6.0#get-matrix-client-r0-register-available) API. + +### Find a user based on their ID in an auth provider + +The API is: + +``` +GET /_synapse/admin/v1/auth_providers/$provider/users/$external_id +``` + +When a user matched the given ID for the given provider, an HTTP code `200` with a response body like the following is returned: + +```json +{ + "user_id": "@hello:example.org" +} +``` + +**Parameters** + +The following parameters should be set in the URL: + +- `provider` - The ID of the authentication provider, as advertised by the [`GET /_matrix/client/v3/login`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3login) API in the `m.login.sso` authentication method. +- `external_id` - The user ID from the authentication provider. Usually corresponds to the `sub` claim for OIDC providers, or to the `uid` attestation for SAML2 providers. + +The `external_id` may have characters that are not URL-safe (typically `/`, `:` or `@`), so it is advised to URL-encode those parameters. + +**Errors** + +Returns a `404` HTTP status code if no user was found, with a response body like this: + +```json +{ + "errcode":"M_NOT_FOUND", + "error":"User not found" +} +``` + +_Added in Synapse 1.68.0._ diff --git a/docs/development/dependencies.md b/docs/development/dependencies.md index b356870f2795..8474525480d6 100644 --- a/docs/development/dependencies.md +++ b/docs/development/dependencies.md @@ -126,6 +126,23 @@ context of poetry's venv, without having to run `poetry shell` beforehand. poetry install --extras all --remove-untracked ``` +## ...delete everything and start over from scratch? + +```shell +# Stop the current virtualenv if active +$ deactivate + +# Remove all of the files from the current environment. +# Don't worry, even though it says "all", this will only +# remove the Poetry virtualenvs for the current project. +$ poetry env remove --all + +# Reactivate Poetry shell to create the virtualenv again +$ poetry shell +# Install everything again +$ poetry install --extras all +``` + ## ...run a command in the `poetry` virtualenv? Use `poetry run cmd args` when you need the python virtualenv context. @@ -256,6 +273,16 @@ from PyPI. (This is what makes poetry seem slow when doing the first `poetry install`.) Try `poetry cache list` and `poetry cache clear --all ` to see if that fixes things. +## Remove outdated egg-info + +Delete the `matrix_synapse.egg-info/` directory from the root of your Synapse +install. + +This stores some cached information about dependencies and often conflicts with +letting Poetry do the right thing. + + + ## Try `--verbose` or `--dry-run` arguments. Sometimes useful to see what poetry's internal logic is. diff --git a/docs/reverse_proxy.md b/docs/reverse_proxy.md index d1618e815526..4e7a1d443533 100644 --- a/docs/reverse_proxy.md +++ b/docs/reverse_proxy.md @@ -45,6 +45,10 @@ listens to traffic on localhost. (Do not change `bind_addresses` to `127.0.0.1` when using a containerized Synapse, as that will prevent it from responding to proxied traffic.) +Optionally, you can also set +[`request_id_header`](../usage/configuration/config_documentation.md#listeners) +so that the server extracts and re-uses the same request ID format that the +reverse proxy is using. ## Reverse-proxy configuration examples diff --git a/docs/sso_mapping_providers.md b/docs/sso_mapping_providers.md index 817499149f90..9f5e5fbbe152 100644 --- a/docs/sso_mapping_providers.md +++ b/docs/sso_mapping_providers.md @@ -73,8 +73,8 @@ A custom mapping provider must specify the following methods: * `async def map_user_attributes(self, userinfo, token, failures)` - This method must be async. - Arguments: - - `userinfo` - A `authlib.oidc.core.claims.UserInfo` object to extract user - information from. + - `userinfo` - An [`authlib.oidc.core.claims.UserInfo`](https://docs.authlib.org/en/latest/specs/oidc.html#authlib.oidc.core.UserInfo) + object to extract user information from. - `token` - A dictionary which includes information necessary to make further requests to the OpenID provider. - `failures` - An `int` that represents the amount of times the returned @@ -91,7 +91,13 @@ A custom mapping provider must specify the following methods: `None`, the user is prompted to pick their own username. This is only used during a user's first login. Once a localpart has been associated with a remote user ID (see `get_remote_user_id`) it cannot be updated. - - `displayname`: An optional string, the display name for the user. + - `confirm_localpart`: A boolean. If set to `True`, when a `localpart` + string is returned from this method, Synapse will prompt the user to + either accept this localpart or pick their own username. Otherwise this + option has no effect. If omitted, defaults to `False`. + - `display_name`: An optional string, the display name for the user. + - `emails`: A list of strings, the email address(es) to associate with + this user. If omitted, defaults to an empty list. * `async def get_extra_attributes(self, userinfo, token)` - This method must be async. - Arguments: diff --git a/docs/upgrade.md b/docs/upgrade.md index 9f165551fa6d..7d4c2392e1f1 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -89,6 +89,13 @@ process, for example: dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb ``` +# Upgrading to v1.68.0 + +As announced in the upgrade notes for v1.67.0, Synapse now requires a SQLite +version of 3.27.0 or higher if SQLite is in use and source checkouts of Synapse +now require a recent Rust compiler. + + # Upgrading to v1.67.0 ## Direct TCP replication is no longer supported: migrate to Redis @@ -125,7 +132,7 @@ From the next major release (v1.68.0) Synapse will require SQLite 3.27.0 or higher. Synapse v1.67.0 will be the last major release supporting SQLite versions 3.22 to 3.26. -Those using docker images or Debian packages from Matrix.org will not be +Those using Docker images or Debian packages from Matrix.org will not be affected. If you have installed from source, you should check the version of SQLite used by Python with: @@ -135,6 +142,7 @@ python -c "import sqlite3; print(sqlite3.sqlite_version)" If this is too old, refer to your distribution for advice on upgrading. + # Upgrading to v1.66.0 ## Delegation of email validation no longer supported diff --git a/docs/usage/administration/request_log.md b/docs/usage/administration/request_log.md index adb5f4f5f353..82f5ac7b96a5 100644 --- a/docs/usage/administration/request_log.md +++ b/docs/usage/administration/request_log.md @@ -12,14 +12,14 @@ See the following for how to decode the dense data available from the default lo | Part | Explanation | | ----- | ------------ | -| AAAA | Timestamp request was logged (not recieved) | +| AAAA | Timestamp request was logged (not received) | | BBBB | Logger name (`synapse.access.(http\|https).`, where 'tag' is defined in the `listeners` config section, normally the port) | | CCCC | Line number in code | | DDDD | Log Level | | EEEE | Request Identifier (This identifier is shared by related log lines)| | FFFF | Source IP (Or X-Forwarded-For if enabled) | | GGGG | Server Port | -| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied) | +| HHHH | Federated Server or Local User making request (blank if unauthenticated or not supplied).
If this is of the form `@aaa:example.com|@bbb:example.com`, then that means that `@aaa:example.com` is authenticated but they are controlling `@bbb:example.com`, e.g. if `aaa` is controlling `bbb` [via the admin API](https://matrix-org.github.io/synapse/latest/admin_api/user_admin_api.html#login-as-a-user). | | IIII | Total Time to process the request | | JJJJ | Time to send response over network once generated (this may be negative if the socket is closed before the response is generated)| | KKKK | Userland CPU time | diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index cd546041b2d4..69d305b62e6d 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -434,7 +434,16 @@ Sub-options for each listener include: * `tls`: set to true to enable TLS for this listener. Will use the TLS key/cert specified in tls_private_key_path / tls_certificate_path. * `x_forwarded`: Only valid for an 'http' listener. Set to true to use the X-Forwarded-For header as the client IP. Useful when Synapse is - behind a reverse-proxy. + behind a [reverse-proxy](../../reverse_proxy.md). + +* `request_id_header`: The header extracted from each incoming request that is + used as the basis for the request ID. The request ID is used in + [logs](../administration/request_log.md#request-log-format) and tracing to + correlate and match up requests. When unset, Synapse will automatically + generate sequential request IDs. This option is useful when Synapse is behind + a [reverse-proxy](../../reverse_proxy.md). + + _Added in Synapse 1.68.0._ * `resources`: Only valid for an 'http' listener. A list of resources to host on this port. Sub-options for each resource are: diff --git a/pyproject.toml b/pyproject.toml index 8e50dd28528a..43f165b8d052 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -57,7 +57,7 @@ manifest-path = "rust/Cargo.toml" [tool.poetry] name = "matrix-synapse" -version = "1.67.0" +version = "1.68.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "Apache-2.0" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index deddf3cec262..8dc5f93ff111 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -18,7 +18,15 @@ crate-type = ["cdylib"] name = "synapse.synapse_rust" [dependencies] -pyo3 = { version = "0.16.5", features = ["extension-module", "macros", "abi3", "abi3-py37"] } +anyhow = "1.0.63" +lazy_static = "1.4.0" +log = "0.4.17" +pyo3 = { version = "0.17.1", features = ["extension-module", "macros", "anyhow", "abi3", "abi3-py37"] } +pyo3-log = "0.7.0" +pythonize = "0.17.0" +regex = "1.6.0" +serde = { version = "1.0.144", features = ["derive"] } +serde_json = "1.0.85" [build-dependencies] blake2 = "0.10.4" diff --git a/rust/src/lib.rs b/rust/src/lib.rs index ba42465fb80b..c7b60e58a73b 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -1,5 +1,7 @@ use pyo3::prelude::*; +pub mod push; + /// Returns the hash of all the rust source files at the time it was compiled. /// /// Used by python to detect if the rust library is outdated. @@ -17,8 +19,13 @@ fn sum_as_string(a: usize, b: usize) -> PyResult { /// The entry point for defining the Python module. #[pymodule] -fn synapse_rust(_py: Python<'_>, m: &PyModule) -> PyResult<()> { +fn synapse_rust(py: Python<'_>, m: &PyModule) -> PyResult<()> { + pyo3_log::init(); + m.add_function(wrap_pyfunction!(sum_as_string, m)?)?; m.add_function(wrap_pyfunction!(get_rust_file_digest, m)?)?; + + push::register_module(py, m)?; + Ok(()) } diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs new file mode 100644 index 000000000000..7c62bc48490f --- /dev/null +++ b/rust/src/push/base_rules.rs @@ -0,0 +1,335 @@ +// Copyright 2022 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Contains the definitions of the "base" push rules. + +use std::borrow::Cow; +use std::collections::HashMap; + +use lazy_static::lazy_static; +use serde_json::Value; + +use super::KnownCondition; +use crate::push::Action; +use crate::push::Condition; +use crate::push::EventMatchCondition; +use crate::push::PushRule; +use crate::push::SetTweak; +use crate::push::TweakValue; + +const HIGHLIGHT_ACTION: Action = Action::SetTweak(SetTweak { + set_tweak: Cow::Borrowed("highlight"), + value: None, + other_keys: Value::Null, +}); + +const HIGHLIGHT_FALSE_ACTION: Action = Action::SetTweak(SetTweak { + set_tweak: Cow::Borrowed("highlight"), + value: Some(TweakValue::Other(Value::Bool(false))), + other_keys: Value::Null, +}); + +const SOUND_ACTION: Action = Action::SetTweak(SetTweak { + set_tweak: Cow::Borrowed("sound"), + value: Some(TweakValue::String(Cow::Borrowed("default"))), + other_keys: Value::Null, +}); + +const RING_ACTION: Action = Action::SetTweak(SetTweak { + set_tweak: Cow::Borrowed("sound"), + value: Some(TweakValue::String(Cow::Borrowed("ring"))), + other_keys: Value::Null, +}); + +pub const BASE_PREPEND_OVERRIDE_RULES: &[PushRule] = &[PushRule { + rule_id: Cow::Borrowed("global/override/.m.rule.master"), + priority_class: 5, + conditions: Cow::Borrowed(&[]), + actions: Cow::Borrowed(&[Action::DontNotify]), + default: true, + default_enabled: false, +}]; + +pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ + PushRule { + rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"), + priority_class: 5, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("content.msgtype"), + pattern: Some(Cow::Borrowed("m.notice")), + pattern_type: None, + }, + ))]), + actions: Cow::Borrowed(&[Action::DontNotify]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/override/.m.rule.invite_for_me"), + priority_class: 5, + conditions: Cow::Borrowed(&[ + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("m.room.member")), + pattern_type: None, + })), + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("content.membership"), + pattern: Some(Cow::Borrowed("invite")), + pattern_type: None, + })), + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("state_key"), + pattern: None, + pattern_type: Some(Cow::Borrowed("user_id")), + })), + ]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION, SOUND_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/override/.m.rule.member_event"), + priority_class: 5, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("m.room.member")), + pattern_type: None, + }, + ))]), + actions: Cow::Borrowed(&[Action::DontNotify]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/override/.m.rule.contains_display_name"), + priority_class: 5, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::ContainsDisplayName)]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/override/.m.rule.roomnotif"), + priority_class: 5, + conditions: Cow::Borrowed(&[ + Condition::Known(KnownCondition::SenderNotificationPermission { + key: Cow::Borrowed("room"), + }), + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("content.body"), + pattern: Some(Cow::Borrowed("@room")), + pattern_type: None, + })), + ]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/override/.m.rule.tombstone"), + priority_class: 5, + conditions: Cow::Borrowed(&[ + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("m.room.tombstone")), + pattern_type: None, + })), + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("state_key"), + pattern: Some(Cow::Borrowed("")), + pattern_type: None, + })), + ]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/override/.m.rule.reaction"), + priority_class: 5, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("m.reaction")), + pattern_type: None, + }, + ))]), + actions: Cow::Borrowed(&[Action::DontNotify]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/override/.org.matrix.msc3786.rule.room.server_acl"), + priority_class: 5, + conditions: Cow::Borrowed(&[ + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("m.room.server_acl")), + pattern_type: None, + })), + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("state_key"), + pattern: Some(Cow::Borrowed("")), + pattern_type: None, + })), + ]), + actions: Cow::Borrowed(&[]), + default: true, + default_enabled: true, + }, +]; + +pub const BASE_APPEND_CONTENT_RULES: &[PushRule] = &[PushRule { + rule_id: Cow::Borrowed("global/content/.m.rule.contains_user_name"), + priority_class: 4, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("content.body"), + pattern: None, + pattern_type: Some(Cow::Borrowed("user_localpart")), + }, + ))]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_ACTION, SOUND_ACTION]), + default: true, + default_enabled: true, +}]; + +pub const BASE_APPEND_UNDERRIDE_RULES: &[PushRule] = &[ + PushRule { + rule_id: Cow::Borrowed("global/underride/.m.rule.call"), + priority_class: 1, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("m.call.invite")), + pattern_type: None, + }, + ))]), + actions: Cow::Borrowed(&[Action::Notify, RING_ACTION, HIGHLIGHT_FALSE_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/underride/.m.rule.room_one_to_one"), + priority_class: 1, + conditions: Cow::Borrowed(&[ + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("m.room.message")), + pattern_type: None, + })), + Condition::Known(KnownCondition::RoomMemberCount { + is: Some(Cow::Borrowed("2")), + }), + ]), + actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted_room_one_to_one"), + priority_class: 1, + conditions: Cow::Borrowed(&[ + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("m.room.encrypted")), + pattern_type: None, + })), + Condition::Known(KnownCondition::RoomMemberCount { + is: Some(Cow::Borrowed("2")), + }), + ]), + actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/underride/.org.matrix.msc3772.thread_reply"), + priority_class: 1, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::RelationMatch { + rel_type: Cow::Borrowed("m.thread"), + sender: None, + sender_type: Some(Cow::Borrowed("user_id")), + })]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/underride/.m.rule.message"), + priority_class: 1, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("m.room.message")), + pattern_type: None, + }, + ))]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/underride/.m.rule.encrypted"), + priority_class: 1, + conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( + EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("m.room.encrypted")), + pattern_type: None, + }, + ))]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), + default: true, + default_enabled: true, + }, + PushRule { + rule_id: Cow::Borrowed("global/underride/.im.vector.jitsi"), + priority_class: 1, + conditions: Cow::Borrowed(&[ + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("type"), + pattern: Some(Cow::Borrowed("im.vector.modular.widgets")), + pattern_type: None, + })), + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("content.type"), + pattern: Some(Cow::Borrowed("jitsi")), + pattern_type: None, + })), + Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: Cow::Borrowed("state_key"), + pattern: Some(Cow::Borrowed("*")), + pattern_type: None, + })), + ]), + actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), + default: true, + default_enabled: true, + }, +]; + +lazy_static! { + pub static ref BASE_RULES_BY_ID: HashMap<&'static str, &'static PushRule> = + BASE_PREPEND_OVERRIDE_RULES + .iter() + .chain(BASE_APPEND_OVERRIDE_RULES.iter()) + .chain(BASE_APPEND_CONTENT_RULES.iter()) + .chain(BASE_APPEND_UNDERRIDE_RULES.iter()) + .map(|rule| { (&*rule.rule_id, rule) }) + .collect(); +} diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs new file mode 100644 index 000000000000..de6764e7c5c7 --- /dev/null +++ b/rust/src/push/mod.rs @@ -0,0 +1,502 @@ +// Copyright 2022 The Matrix.org Foundation C.I.C. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! An implementation of Matrix push rules. +//! +//! The `Cow<_>` type is used extensively within this module to allow creating +//! the base rules as constants (in Rust constants can't require explicit +//! allocation atm). +//! +//! --- +//! +//! Push rules is the system used to determine which events trigger a push (and a +//! bump in notification counts). +//! +//! This consists of a list of "push rules" for each user, where a push rule is a +//! pair of "conditions" and "actions". When a user receives an event Synapse +//! iterates over the list of push rules until it finds one where all the conditions +//! match the event, at which point "actions" describe the outcome (e.g. notify, +//! highlight, etc). +//! +//! Push rules are split up into 5 different "kinds" (aka "priority classes"), which +//! are run in order: +//! 1. Override — highest priority rules, e.g. always ignore notices +//! 2. Content — content specific rules, e.g. @ notifications +//! 3. Room — per room rules, e.g. enable/disable notifications for all messages +//! in a room +//! 4. Sender — per sender rules, e.g. never notify for messages from a given +//! user +//! 5. Underride — the lowest priority "default" rules, e.g. notify for every +//! message. +//! +//! The set of "base rules" are the list of rules that every user has by default. A +//! user can modify their copy of the push rules in one of three ways: +//! +//! 1. Adding a new push rule of a certain kind +//! 2. Changing the actions of a base rule +//! 3. Enabling/disabling a base rule. +//! +//! The base rules are split into whether they come before or after a particular +//! kind, so the order of push rule evaluation would be: base rules for before +//! "override" kind, user defined "override" rules, base rules after "override" +//! kind, etc, etc. + +use std::borrow::Cow; +use std::collections::{BTreeMap, HashMap, HashSet}; + +use anyhow::{Context, Error}; +use log::warn; +use pyo3::prelude::*; +use pythonize::pythonize; +use serde::de::Error as _; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +mod base_rules; + +/// Called when registering modules with python. +pub fn register_module(py: Python<'_>, m: &PyModule) -> PyResult<()> { + let child_module = PyModule::new(py, "push")?; + child_module.add_class::()?; + child_module.add_class::()?; + child_module.add_class::()?; + child_module.add_function(wrap_pyfunction!(get_base_rule_ids, m)?)?; + + m.add_submodule(child_module)?; + + // We need to manually add the module to sys.modules to make `from + // synapse.synapse_rust import push` work. + py.import("sys")? + .getattr("modules")? + .set_item("synapse.synapse_rust.push", child_module)?; + + Ok(()) +} + +#[pyfunction] +fn get_base_rule_ids() -> HashSet<&'static str> { + base_rules::BASE_RULES_BY_ID.keys().copied().collect() +} + +/// A single push rule for a user. +#[derive(Debug, Clone)] +#[pyclass(frozen)] +pub struct PushRule { + /// A unique ID for this rule + pub rule_id: Cow<'static, str>, + /// The "kind" of push rule this is (see `PRIORITY_CLASS_MAP` in Python) + #[pyo3(get)] + pub priority_class: i32, + /// The conditions that must all match for actions to be applied + pub conditions: Cow<'static, [Condition]>, + /// The actions to apply if all conditions are met + pub actions: Cow<'static, [Action]>, + /// Whether this is a base rule + #[pyo3(get)] + pub default: bool, + /// Whether this is enabled by default + #[pyo3(get)] + pub default_enabled: bool, +} + +#[pymethods] +impl PushRule { + #[staticmethod] + pub fn from_db( + rule_id: String, + priority_class: i32, + conditions: &str, + actions: &str, + ) -> Result { + let conditions = serde_json::from_str(conditions).context("parsing conditions")?; + let actions = serde_json::from_str(actions).context("parsing actions")?; + + Ok(PushRule { + rule_id: Cow::Owned(rule_id), + priority_class, + conditions, + actions, + default: false, + default_enabled: true, + }) + } + + #[getter] + fn rule_id(&self) -> &str { + &self.rule_id + } + + #[getter] + fn actions(&self) -> Vec { + self.actions.clone().into_owned() + } + + #[getter] + fn conditions(&self) -> Vec { + self.conditions.clone().into_owned() + } + + fn __repr__(&self) -> String { + format!( + "", + self.rule_id, self.conditions, self.actions + ) + } +} + +/// The "action" Synapse should perform for a matching push rule. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum Action { + DontNotify, + Notify, + Coalesce, + SetTweak(SetTweak), + + // An unrecognized custom action. + Unknown(Value), +} + +impl IntoPy for Action { + fn into_py(self, py: Python<'_>) -> PyObject { + // When we pass the `Action` struct to Python we want it to be converted + // to a dict. We use `pythonize`, which converts the struct using the + // `serde` serialization. + pythonize(py, &self).expect("valid action") + } +} + +/// The body of a `SetTweak` push action. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +pub struct SetTweak { + set_tweak: Cow<'static, str>, + + #[serde(skip_serializing_if = "Option::is_none")] + value: Option, + + // This picks up any other fields that may have been added by clients. + // These get added when we convert the `Action` to a python object. + #[serde(flatten)] + other_keys: Value, +} + +/// The value of a `set_tweak`. +/// +/// We need this (rather than using `TweakValue` directly) so that we can use +/// `&'static str` in the value when defining the constant base rules. +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(untagged)] +pub enum TweakValue { + String(Cow<'static, str>), + Other(Value), +} + +impl Serialize for Action { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + Action::DontNotify => serializer.serialize_str("dont_notify"), + Action::Notify => serializer.serialize_str("notify"), + Action::Coalesce => serializer.serialize_str("coalesce"), + Action::SetTweak(tweak) => tweak.serialize(serializer), + Action::Unknown(value) => value.serialize(serializer), + } + } +} + +/// Simple helper class for deserializing Action from JSON. +#[derive(Deserialize)] +#[serde(untagged)] +enum ActionDeserializeHelper { + Str(String), + SetTweak(SetTweak), + Unknown(Value), +} + +impl<'de> Deserialize<'de> for Action { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let helper: ActionDeserializeHelper = Deserialize::deserialize(deserializer)?; + match helper { + ActionDeserializeHelper::Str(s) => match &*s { + "dont_notify" => Ok(Action::DontNotify), + "notify" => Ok(Action::Notify), + "coalesce" => Ok(Action::Coalesce), + _ => Err(D::Error::custom("unrecognized action")), + }, + ActionDeserializeHelper::SetTweak(set_tweak) => Ok(Action::SetTweak(set_tweak)), + ActionDeserializeHelper::Unknown(value) => Ok(Action::Unknown(value)), + } + } +} + +/// A condition used in push rules to match against an event. +/// +/// We need this split as `serde` doesn't give us the ability to have a +/// "catchall" variant in tagged enums. +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(untagged)] +pub enum Condition { + /// A recognized condition that we can match against + Known(KnownCondition), + /// An unrecognized condition that we ignore. + Unknown(Value), +} + +/// The set of "known" conditions that we can handle. +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "kind")] +pub enum KnownCondition { + EventMatch(EventMatchCondition), + ContainsDisplayName, + RoomMemberCount { + #[serde(skip_serializing_if = "Option::is_none")] + is: Option>, + }, + SenderNotificationPermission { + key: Cow<'static, str>, + }, + #[serde(rename = "org.matrix.msc3772.relation_match")] + RelationMatch { + rel_type: Cow<'static, str>, + #[serde(skip_serializing_if = "Option::is_none")] + sender: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + sender_type: Option>, + }, +} + +impl IntoPy for Condition { + fn into_py(self, py: Python<'_>) -> PyObject { + pythonize(py, &self).expect("valid condition") + } +} + +/// The body of a [`Condition::EventMatch`] +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct EventMatchCondition { + key: Cow<'static, str>, + #[serde(skip_serializing_if = "Option::is_none")] + pattern: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pattern_type: Option>, +} + +/// The collection of push rules for a user. +#[derive(Debug, Clone, Default)] +#[pyclass(frozen)] +struct PushRules { + /// Custom push rules that override a base rule. + overridden_base_rules: HashMap, PushRule>, + + /// Custom rules that come between the prepend/append override base rules. + override_rules: Vec, + /// Custom rules that come before the base content rules. + content: Vec, + /// Custom rules that come before the base room rules. + room: Vec, + /// Custom rules that come before the base sender rules. + sender: Vec, + /// Custom rules that come before the base underride rules. + underride: Vec, +} + +#[pymethods] +impl PushRules { + #[new] + fn new(rules: Vec) -> PushRules { + let mut push_rules: PushRules = Default::default(); + + for rule in rules { + if let Some(&o) = base_rules::BASE_RULES_BY_ID.get(&*rule.rule_id) { + push_rules.overridden_base_rules.insert( + rule.rule_id.clone(), + PushRule { + actions: rule.actions.clone(), + ..o.clone() + }, + ); + + continue; + } + + match rule.priority_class { + 5 => push_rules.override_rules.push(rule), + 4 => push_rules.content.push(rule), + 3 => push_rules.room.push(rule), + 2 => push_rules.sender.push(rule), + 1 => push_rules.underride.push(rule), + _ => { + warn!( + "Unrecognized priority class for rule {}: {}", + rule.rule_id, rule.priority_class + ); + } + } + } + + push_rules + } + + /// Returns the list of all rules, including base rules, in the order they + /// should be executed in. + fn rules(&self) -> Vec { + self.iter().cloned().collect() + } +} + +impl PushRules { + /// Iterates over all the rules, including base rules, in the order they + /// should be executed in. + pub fn iter(&self) -> impl Iterator { + base_rules::BASE_PREPEND_OVERRIDE_RULES + .iter() + .chain(self.override_rules.iter()) + .chain(base_rules::BASE_APPEND_OVERRIDE_RULES.iter()) + .chain(self.content.iter()) + .chain(base_rules::BASE_APPEND_CONTENT_RULES.iter()) + .chain(self.room.iter()) + .chain(self.sender.iter()) + .chain(self.underride.iter()) + .chain(base_rules::BASE_APPEND_UNDERRIDE_RULES.iter()) + .map(|rule| { + self.overridden_base_rules + .get(&*rule.rule_id) + .unwrap_or(rule) + }) + } +} + +/// A wrapper around `PushRules` that checks the enabled state of rules and +/// filters out disabled experimental rules. +#[derive(Debug, Clone, Default)] +#[pyclass(frozen)] +pub struct FilteredPushRules { + push_rules: PushRules, + enabled_map: BTreeMap, + msc3786_enabled: bool, + msc3772_enabled: bool, +} + +#[pymethods] +impl FilteredPushRules { + #[new] + fn py_new( + push_rules: PushRules, + enabled_map: BTreeMap, + msc3786_enabled: bool, + msc3772_enabled: bool, + ) -> Self { + Self { + push_rules, + enabled_map, + msc3786_enabled, + msc3772_enabled, + } + } + + /// Returns the list of all rules and their enabled state, including base + /// rules, in the order they should be executed in. + fn rules(&self) -> Vec<(PushRule, bool)> { + self.iter().map(|(r, e)| (r.clone(), e)).collect() + } +} + +impl FilteredPushRules { + /// Iterates over all the rules and their enabled state, including base + /// rules, in the order they should be executed in. + fn iter(&self) -> impl Iterator { + self.push_rules + .iter() + .filter(|rule| { + // Ignore disabled experimental push rules + if !self.msc3786_enabled + && rule.rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl" + { + return false; + } + + if !self.msc3772_enabled + && rule.rule_id == "global/underride/.org.matrix.msc3772.thread_reply" + { + return false; + } + + true + }) + .map(|r| { + let enabled = *self + .enabled_map + .get(&*r.rule_id) + .unwrap_or(&r.default_enabled); + (r, enabled) + }) + } +} + +#[test] +fn test_serialize_condition() { + let condition = Condition::Known(KnownCondition::EventMatch(EventMatchCondition { + key: "content.body".into(), + pattern: Some("coffee".into()), + pattern_type: None, + })); + + let json = serde_json::to_string(&condition).unwrap(); + assert_eq!( + json, + r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"# + ) +} + +#[test] +fn test_deserialize_condition() { + let json = r#"{"kind":"event_match","key":"content.body","pattern":"coffee"}"#; + + let _: Condition = serde_json::from_str(json).unwrap(); +} + +#[test] +fn test_deserialize_custom_condition() { + let json = r#"{"kind":"custom_tag"}"#; + + let condition: Condition = serde_json::from_str(json).unwrap(); + assert!(matches!(condition, Condition::Unknown(_))); + + let new_json = serde_json::to_string(&condition).unwrap(); + assert_eq!(json, new_json); +} + +#[test] +fn test_deserialize_action() { + let _: Action = serde_json::from_str(r#""notify""#).unwrap(); + let _: Action = serde_json::from_str(r#""dont_notify""#).unwrap(); + let _: Action = serde_json::from_str(r#""coalesce""#).unwrap(); + let _: Action = serde_json::from_str(r#"{"set_tweak": "highlight"}"#).unwrap(); +} + +#[test] +fn test_custom_action() { + let json = r#"{"some_custom":"action_fields"}"#; + + let action: Action = serde_json::from_str(json).unwrap(); + assert!(matches!(action, Action::Unknown(_))); + + let new_json = serde_json::to_string(&action).unwrap(); + assert_eq!(json, new_json); +} diff --git a/scripts-dev/make_full_schema.sh b/scripts-dev/make_full_schema.sh index 61394360cecb..d8cd06ee4f45 100755 --- a/scripts-dev/make_full_schema.sh +++ b/scripts-dev/make_full_schema.sh @@ -2,23 +2,16 @@ # # This script generates SQL files for creating a brand new Synapse DB with the latest # schema, on both SQLite3 and Postgres. -# -# It does so by having Synapse generate an up-to-date SQLite DB, then running -# synapse_port_db to convert it to Postgres. It then dumps the contents of both. export PGHOST="localhost" -POSTGRES_DB_NAME="synapse_full_schema.$$" - -SQLITE_SCHEMA_FILE="schema.sql.sqlite" -SQLITE_ROWS_FILE="rows.sql.sqlite" -POSTGRES_SCHEMA_FILE="full.sql.postgres" -POSTGRES_ROWS_FILE="rows.sql.postgres" - +POSTGRES_MAIN_DB_NAME="synapse_full_schema_main.$$" +POSTGRES_COMMON_DB_NAME="synapse_full_schema_common.$$" +POSTGRES_STATE_DB_NAME="synapse_full_schema_state.$$" REQUIRED_DEPS=("matrix-synapse" "psycopg2") usage() { echo - echo "Usage: $0 -p -o [-c] [-n] [-h]" + echo "Usage: $0 -p -o [-c] [-n ] [-h]" echo echo "-p " echo " Username to connect to local postgres instance. The password will be requested" @@ -27,11 +20,16 @@ usage() { echo " CI mode. Prints every command that the script runs." echo "-o " echo " Directory to output full schema files to." + echo "-n " + echo " Schema number for the new snapshot. Used to set the location of files within " + echo " the output directory, mimicking that of synapse/storage/schemas." + echo " Defaults to 9999." echo "-h" echo " Display this help text." } -while getopts "p:co:h" opt; do +SCHEMA_NUMBER="9999" +while getopts "p:co:hn:" opt; do case $opt in p) export PGUSER=$OPTARG @@ -48,6 +46,9 @@ while getopts "p:co:h" opt; do usage exit ;; + n) + SCHEMA_NUMBER="$OPTARG" + ;; \?) echo "ERROR: Invalid option: -$OPTARG" >&2 usage @@ -95,12 +96,21 @@ cd "$(dirname "$0")/.." TMPDIR=$(mktemp -d) KEY_FILE=$TMPDIR/test.signing.key # default Synapse signing key path SQLITE_CONFIG=$TMPDIR/sqlite.conf -SQLITE_DB=$TMPDIR/homeserver.db +SQLITE_MAIN_DB=$TMPDIR/main.db +SQLITE_STATE_DB=$TMPDIR/state.db +SQLITE_COMMON_DB=$TMPDIR/common.db POSTGRES_CONFIG=$TMPDIR/postgres.conf # Ensure these files are delete on script exit -# TODO: the trap should also drop the temp postgres DB -trap 'rm -rf $TMPDIR' EXIT +cleanup() { + echo "Cleaning up temporary sqlite database and config files..." + rm -r "$TMPDIR" + echo "Cleaning up temporary Postgres database..." + dropdb --if-exists "$POSTGRES_COMMON_DB_NAME" + dropdb --if-exists "$POSTGRES_MAIN_DB_NAME" + dropdb --if-exists "$POSTGRES_STATE_DB_NAME" +} +trap 'cleanup' EXIT cat > "$SQLITE_CONFIG" < "$OUTPUT_DIR/$SQLITE_SCHEMA_FILE" -sqlite3 "$SQLITE_DB" ".dump --data-only --nosys" > "$OUTPUT_DIR/$SQLITE_ROWS_FILE" +echo "Dumping SQLite3 schema..." + +mkdir -p "$OUTPUT_DIR/"{common,main,state}"/full_schema/$SCHEMA_NUMBER" +sqlite3 "$SQLITE_COMMON_DB" ".schema --indent" > "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.sqlite" +sqlite3 "$SQLITE_COMMON_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.sqlite" +sqlite3 "$SQLITE_MAIN_DB" ".schema --indent" > "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.sqlite" +sqlite3 "$SQLITE_MAIN_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.sqlite" +sqlite3 "$SQLITE_STATE_DB" ".schema --indent" > "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.sqlite" +sqlite3 "$SQLITE_STATE_DB" ".dump --data-only --nosys" >> "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.sqlite" + +cleanup_pg_schema() { + sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' +} -echo "Dumping Postgres schema to '$OUTPUT_DIR/$POSTGRES_SCHEMA_FILE' and '$OUTPUT_DIR/$POSTGRES_ROWS_FILE'..." -pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_DB_NAME" | sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_SCHEMA_FILE" -pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_DB_NAME" | sed -e '/^$/d' -e '/^--/d' -e 's/public\.//g' -e '/^SET /d' -e '/^SELECT /d' > "$OUTPUT_DIR/$POSTGRES_ROWS_FILE" +echo "Dumping Postgres schema..." -echo "Cleaning up temporary Postgres database..." -dropdb $POSTGRES_DB_NAME +pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.postgres" +pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_COMMON_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/common/full_schema/$SCHEMA_NUMBER/full.sql.postgres" +pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.postgres" +pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_MAIN_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/main/full_schema/$SCHEMA_NUMBER/full.sql.postgres" +pg_dump --format=plain --schema-only --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema > "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.postgres" +pg_dump --format=plain --data-only --inserts --no-tablespaces --no-acl --no-owner "$POSTGRES_STATE_DB_NAME" | cleanup_pg_schema >> "$OUTPUT_DIR/state/full_schema/$SCHEMA_NUMBER/full.sql.postgres" echo "Done! Files dumped to: $OUTPUT_DIR" diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index d08517a95382..2c377533c0fd 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -29,7 +29,7 @@ def get_method_signature_hook( self, fullname: str ) -> Optional[Callable[[MethodSigContext], CallableType]]: if fullname.startswith( - "synapse.util.caches.descriptors._CachedFunction.__call__" + "synapse.util.caches.descriptors.CachedFunction.__call__" ) or fullname.startswith( "synapse.util.caches.descriptors._LruCachedFunction.__call__" ): @@ -38,7 +38,7 @@ def get_method_signature_hook( def cached_function_method_signature(ctx: MethodSigContext) -> CallableType: - """Fixes the `_CachedFunction.__call__` signature to be correct. + """Fixes the `CachedFunction.__call__` signature to be correct. It already has *almost* the correct signature, except: diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 6603bc593b67..c82c58c54b05 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -427,11 +427,12 @@ def _publish(gh_token: str) -> None: @cli.command() -def upload() -> None: - _upload() +@click.option("--gh-token", envvar=["GH_TOKEN", "GITHUB_TOKEN"], required=False) +def upload(gh_token: Optional[str]) -> None: + _upload(gh_token) -def _upload() -> None: +def _upload(gh_token: Optional[str]) -> None: """Upload release to pypi.""" current_version = get_package_version() @@ -444,18 +445,40 @@ def _upload() -> None: click.echo("Tag {tag_name} (tag.commit) is not currently checked out!") click.get_current_context().abort() - pypi_asset_names = [ - f"matrix_synapse-{current_version}-py3-none-any.whl", - f"matrix-synapse-{current_version}.tar.gz", - ] + # Query all the assets corresponding to this release. + gh = Github(gh_token) + gh_repo = gh.get_repo("matrix-org/synapse") + gh_release = gh_repo.get_release(tag_name) + + all_assets = set(gh_release.get_assets()) + + # Only accept the wheels and sdist. + # Notably: we don't care about debs.tar.xz. + asset_names_and_urls = sorted( + (asset.name, asset.browser_download_url) + for asset in all_assets + if asset.name.endswith((".whl", ".tar.gz")) + ) + + # Print out what we've determined. + print("Found relevant assets:") + for asset_name, _ in asset_names_and_urls: + print(f" - {asset_name}") + + ignored_asset_names = sorted( + {asset.name for asset in all_assets} + - {asset_name for asset_name, _ in asset_names_and_urls} + ) + print("\nIgnoring irrelevant assets:") + for asset_name in ignored_asset_names: + print(f" - {asset_name}") with TemporaryDirectory(prefix=f"synapse_upload_{tag_name}_") as tmpdir: - for name in pypi_asset_names: + for name, asset_download_url in asset_names_and_urls: filename = path.join(tmpdir, name) - url = f"https://github.com/matrix-org/synapse/releases/download/{tag_name}/{name}" click.echo(f"Downloading {name} into {filename}") - urllib.request.urlretrieve(url, filename=filename) + urllib.request.urlretrieve(asset_download_url, filename=filename) if click.confirm("Upload to PyPI?", default=True): subprocess.run("twine upload *", shell=True, cwd=tmpdir) @@ -672,7 +695,7 @@ def full(gh_token: str) -> None: _publish(gh_token) click.echo("\n*** upload ***") - _upload() + _upload(gh_token) click.echo("\n*** merge back ***") _merge_back() diff --git a/stubs/synapse/synapse_rust.pyi b/stubs/synapse/synapse_rust/__init__.pyi similarity index 100% rename from stubs/synapse/synapse_rust.pyi rename to stubs/synapse/synapse_rust/__init__.pyi diff --git a/stubs/synapse/synapse_rust/push.pyi b/stubs/synapse/synapse_rust/push.pyi new file mode 100644 index 000000000000..93c4e69d424f --- /dev/null +++ b/stubs/synapse/synapse_rust/push.pyi @@ -0,0 +1,37 @@ +from typing import Any, Collection, Dict, Mapping, Sequence, Tuple, Union + +from synapse.types import JsonDict + +class PushRule: + @property + def rule_id(self) -> str: ... + @property + def priority_class(self) -> int: ... + @property + def conditions(self) -> Sequence[Mapping[str, str]]: ... + @property + def actions(self) -> Sequence[Union[Mapping[str, Any], str]]: ... + @property + def default(self) -> bool: ... + @property + def default_enabled(self) -> bool: ... + @staticmethod + def from_db( + rule_id: str, priority_class: int, conditions: str, actions: str + ) -> "PushRule": ... + +class PushRules: + def __init__(self, rules: Collection[PushRule]): ... + def rules(self) -> Collection[PushRule]: ... + +class FilteredPushRules: + def __init__( + self, + push_rules: PushRules, + enabled_map: Dict[str, bool], + msc3786_enabled: bool, + msc3772_enabled: bool, + ): ... + def rules(self) -> Collection[Tuple[PushRule, bool]]: ... + +def get_base_rule_ids() -> Collection[str]: ... diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 30983c47fbb7..450ba462ba11 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -111,6 +111,7 @@ "e2e_fallback_keys_json": ["used"], "access_tokens": ["used"], "device_lists_changes_in_room": ["converted_to_destinations"], + "pushers": ["enabled"], } diff --git a/synapse/_scripts/update_synapse_database.py b/synapse/_scripts/update_synapse_database.py index b4aeae6dd5b8..fb1fb83f50d9 100755 --- a/synapse/_scripts/update_synapse_database.py +++ b/synapse/_scripts/update_synapse_database.py @@ -48,10 +48,13 @@ def __init__(self, config: HomeServerConfig): def run_background_updates(hs: HomeServer) -> None: - store = hs.get_datastores().main + main = hs.get_datastores().main + state = hs.get_datastores().state async def run_background_updates() -> None: - await store.db_pool.updates.run_background_updates(sleep=False) + await main.db_pool.updates.run_background_updates(sleep=False) + if state: + await state.db_pool.updates.run_background_updates(sleep=False) # Stop the reactor to exit the script once every background update is run. reactor.stop() @@ -97,8 +100,11 @@ def main() -> None: # Load, process and sanity-check the config. hs_config = yaml.safe_load(args.database_config) - if "database" not in hs_config: - sys.stderr.write("The configuration file must have a 'database' section.\n") + if "database" not in hs_config and "databases" not in hs_config: + sys.stderr.write( + "The configuration file must have a 'database' or 'databases' section. " + "See https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#database" + ) sys.exit(4) config = HomeServerConfig() diff --git a/synapse/api/constants.py b/synapse/api/constants.py index c178ddf070b4..3882f5c9b251 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -31,6 +31,9 @@ # the maximum length for a user id is 255 characters MAX_USERID_LENGTH = 255 +# Constant value used for the pseudo-thread which is the main timeline. +MAIN_TIMELINE: Final = "main" + class Membership: diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 0abd31875219..a370f39ba5d3 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -97,3 +97,13 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC3852: Expose last seen user agent field on /_matrix/client/v3/devices. self.msc3852_enabled: bool = experimental.get("msc3852_enabled", False) + + # MSC3881: Remotely toggle push notifications for another client + self.msc3881_enabled: bool = experimental.get("msc3881_enabled", False) + + # MSC3882: Allow an existing session to sign in a new session + self.msc3882_enabled: bool = experimental.get("msc3882_enabled", False) + self.msc3882_ui_auth: bool = experimental.get("msc3882_ui_auth", True) + self.msc3882_token_timeout = self.parse_duration( + experimental.get("msc3882_token_timeout", "5m") + ) diff --git a/synapse/config/server.py b/synapse/config/server.py index c91df636d9a7..f2353ce5fb0a 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -206,6 +206,7 @@ class HttpListenerConfig: resources: List[HttpResourceConfig] = attr.Factory(list) additional_resources: Dict[str, dict] = attr.Factory(dict) tag: Optional[str] = None + request_id_header: Optional[str] = None @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -520,9 +521,11 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: ): raise ConfigError("allowed_avatar_mimetypes must be a list") - self.listeners = [ - parse_listener_def(i, x) for i, x in enumerate(config.get("listeners", [])) - ] + listeners = config.get("listeners", []) + if not isinstance(listeners, list): + raise ConfigError("Expected a list", ("listeners",)) + + self.listeners = [parse_listener_def(i, x) for i, x in enumerate(listeners)] # no_tls is not really supported any more, but let's grandfather it in # here. @@ -889,6 +892,9 @@ def read_gc_thresholds( def parse_listener_def(num: int, listener: Any) -> ListenerConfig: """parse a listener config from the config file""" + if not isinstance(listener, dict): + raise ConfigError("Expected a dictionary", ("listeners", str(num))) + listener_type = listener["type"] # Raise a helpful error if direct TCP replication is still configured. if listener_type == "replication": @@ -928,6 +934,7 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig: resources=resources, additional_resources=listener.get("additional_resources", {}), tag=listener.get("tag"), + request_id_header=listener.get("request_id_header"), ) return ListenerConfig(port, bind_addresses, listener_type, tls, http_config) diff --git a/synapse/events/builder.py b/synapse/events/builder.py index 746bd3978d96..e2ee10dd3ddc 100644 --- a/synapse/events/builder.py +++ b/synapse/events/builder.py @@ -167,7 +167,6 @@ async def build( "content": self.content, "unsigned": self.unsigned, "depth": depth, - "prev_state": [], } if self.is_state(): diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 4a4289ee7cdd..464672a3da81 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -906,9 +906,6 @@ async def send_request(destination: str) -> Tuple[str, EventBase, RoomVersion]: # The protoevent received over the JSON wire may not have all # the required fields. Lets just gloss over that because # there's some we never care about - if "prev_state" not in pdu_dict: - pdu_dict["prev_state"] = [] - ev = builder.create_local_event_from_event_dict( self._clock, self.hostname, diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 0327fc57a4ec..eacd631ee07d 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -63,7 +63,6 @@ from synapse.http.site import SynapseRequest from synapse.logging.context import defer_to_thread from synapse.metrics.background_process_metrics import run_as_background_process -from synapse.storage.roommember import ProfileInfo from synapse.types import JsonDict, Requester, UserID from synapse.util import stringutils as stringutils from synapse.util.async_helpers import delay_cancellation, maybe_awaitable @@ -1687,41 +1686,10 @@ async def complete_sso_login( respond_with_html(request, 403, self._sso_account_deactivated_template) return - profile = await self.store.get_profileinfo( + user_profile_data = await self.store.get_profileinfo( UserID.from_string(registered_user_id).localpart ) - self._complete_sso_login( - registered_user_id, - auth_provider_id, - request, - client_redirect_url, - extra_attributes, - new_user=new_user, - user_profile_data=profile, - auth_provider_session_id=auth_provider_session_id, - ) - - def _complete_sso_login( - self, - registered_user_id: str, - auth_provider_id: str, - request: Request, - client_redirect_url: str, - extra_attributes: Optional[JsonDict] = None, - new_user: bool = False, - user_profile_data: Optional[ProfileInfo] = None, - auth_provider_session_id: Optional[str] = None, - ) -> None: - """ - The synchronous portion of complete_sso_login. - - This exists purely for backwards compatibility of synapse.module_api.ModuleApi. - """ - - if user_profile_data is None: - user_profile_data = ProfileInfo(None, None) - # Store any extra attributes which will be passed in the login response. # Note that this is per-user so it may overwrite a previous value, this # is considered OK since the newest SSO attributes should be most valid. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 8eed63ccf3ac..09a2492afc9e 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -188,18 +188,21 @@ async def query_devices( ) invalid_cached_users = cached_users - valid_cached_users if invalid_cached_users: - # Fix up results. If we get here, there is either a bug in device - # list tracking, or we hit the race mentioned above. + # Fix up results. If we get here, it means there was either a bug in + # device list tracking, or we hit the race mentioned above. + # TODO: In practice, this path is hit fairly often in existing + # deployments when clients query the keys of departed remote + # users. A background update to mark the appropriate device + # lists as unsubscribed is needed. + # https://github.com/matrix-org/synapse/issues/13651 + # Note that this currently introduces a failure mode when clients + # are trying to decrypt old messages from a remote user whose + # homeserver is no longer available. We may want to consider falling + # back to the cached data when we fail to retrieve a device list + # over federation for such remote users. user_ids_not_in_cache.update(invalid_cached_users) for invalid_user_id in invalid_cached_users: remote_results.pop(invalid_user_id) - # This log message may be removed if it turns out it's almost - # entirely triggered by races. - logger.error( - "Devices for %s were cached, but the server no longer shares " - "any rooms with them. The cached device lists are stale.", - invalid_cached_users, - ) for user_id, devices in remote_results.items(): user_devices = results.setdefault(user_id, {}) diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 9e065e1116b5..efcdb8405783 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -866,6 +866,11 @@ async def _process_pulled_event( event.room_id, event_id, str(err) ) return + except Exception as exc: + await self._store.record_event_failed_pull_attempt( + event.room_id, event_id, str(exc) + ) + raise exc try: try: @@ -908,6 +913,11 @@ async def _process_pulled_event( logger.warning("Pulled event %s failed history check.", event_id) else: raise + except Exception as exc: + await self._store.record_event_failed_pull_attempt( + event.room_id, event_id, str(exc) + ) + raise exc @trace async def _compute_event_context_with_maybe_missing_prevs( diff --git a/synapse/handlers/push_rules.py b/synapse/handlers/push_rules.py index 2599160bcc00..1219672a59dc 100644 --- a/synapse/handlers/push_rules.py +++ b/synapse/handlers/push_rules.py @@ -16,14 +16,17 @@ import attr from synapse.api.errors import SynapseError, UnrecognizedRequestError -from synapse.push.baserules import BASE_RULE_IDS from synapse.storage.push_rule import RuleNotFoundException +from synapse.synapse_rust.push import get_base_rule_ids from synapse.types import JsonDict if TYPE_CHECKING: from synapse.server import HomeServer +BASE_RULE_IDS = get_base_rule_ids() + + @attr.s(slots=True, frozen=True, auto_attribs=True) class RuleSpec: scope: str diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 20ec22105a34..cfcadb34db85 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -997,7 +997,7 @@ async def _register_email_threepid( assert user_tuple token_id = user_tuple.token_id - await self.pusher_pool.add_pusher( + await self.pusher_pool.add_or_update_pusher( user_id=user_id, access_token=token_id, kind="email", @@ -1005,7 +1005,7 @@ async def _register_email_threepid( app_display_name="Email Notifications", device_display_name=threepid["address"], pushkey=threepid["address"], - lang=None, # We don't know a user's language here + lang=None, data={}, ) diff --git a/synapse/handlers/sso.py b/synapse/handlers/sso.py index 1e171f3f7115..6bc1cbd78790 100644 --- a/synapse/handlers/sso.py +++ b/synapse/handlers/sso.py @@ -128,6 +128,9 @@ async def handle_redirect_request( @attr.s(auto_attribs=True) class UserAttributes: + # NB: This struct is documented in docs/sso_mapping_providers.md so that users can + # populate it with data from their own mapping providers. + # the localpart of the mxid that the mapper has assigned to the user. # if `None`, the mapper has not picked a userid, and the user should be prompted to # enter one. diff --git a/synapse/http/site.py b/synapse/http/site.py index 1155f3f61032..55a6afce3552 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -72,10 +72,12 @@ def __init__( site: "SynapseSite", *args: Any, max_request_body_size: int = 1024, + request_id_header: Optional[str] = None, **kw: Any, ): super().__init__(channel, *args, **kw) self._max_request_body_size = max_request_body_size + self.request_id_header = request_id_header self.synapse_site = site self.reactor = site.reactor self._channel = channel # this is used by the tests @@ -172,7 +174,14 @@ def set_opentracing_span(self, span: "opentracing.Span") -> None: self._opentracing_span = span def get_request_id(self) -> str: - return "%s-%i" % (self.get_method(), self.request_seq) + request_id_value = None + if self.request_id_header: + request_id_value = self.getHeader(self.request_id_header) + + if request_id_value is None: + request_id_value = str(self.request_seq) + + return "%s-%s" % (self.get_method(), request_id_value) def get_redacted_uri(self) -> str: """Gets the redacted URI associated with the request (or placeholder if the URI @@ -611,12 +620,15 @@ def __init__( proxied = config.http_options.x_forwarded request_class = XForwardedForRequest if proxied else SynapseRequest + request_id_header = config.http_options.request_id_header + def request_factory(channel: HTTPChannel, queued: bool) -> Request: return request_class( channel, self, max_request_body_size=max_request_body_size, queued=queued, + request_id_header=request_id_header, ) self.requestFactory = request_factory # type: ignore diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 87ba154cb737..59755bff6d01 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -125,7 +125,7 @@ ) from synapse.util import Clock from synapse.util.async_helpers import maybe_awaitable -from synapse.util.caches.descriptors import cached +from synapse.util.caches.descriptors import CachedFunction, cached from synapse.util.frozenutils import freeze if TYPE_CHECKING: @@ -836,29 +836,35 @@ def run_db_interaction( self._store.db_pool.runInteraction(desc, func, *args, **kwargs) # type: ignore[arg-type] ) - def complete_sso_login( - self, registered_user_id: str, request: SynapseRequest, client_redirect_url: str - ) -> None: - """Complete a SSO login by redirecting the user to a page to confirm whether they - want their access token sent to `client_redirect_url`, or redirect them to that - URL with a token directly if the URL matches with one of the whitelisted clients. + def register_cached_function(self, cached_func: CachedFunction) -> None: + """Register a cached function that should be invalidated across workers. + Invalidation local to a worker can be done directly using `cached_func.invalidate`, + however invalidation that needs to go to other workers needs to call `invalidate_cache` + on the module API instead. - This is deprecated in favor of complete_sso_login_async. + Args: + cached_function: The cached function that will be registered to receive invalidation + locally and from other workers. + """ + self._store.register_external_cached_function( + f"{cached_func.__module__}.{cached_func.__name__}", cached_func + ) - Added in Synapse v1.11.1. + async def invalidate_cache( + self, cached_func: CachedFunction, keys: Tuple[Any, ...] + ) -> None: + """Invalidate a cache entry of a cached function across workers. The cached function + needs to be registered on all workers first with `register_cached_function`. Args: - registered_user_id: The MXID that has been registered as a previous step of - of this SSO login. - request: The request to respond to. - client_redirect_url: The URL to which to offer to redirect the user (or to - redirect them directly if whitelisted). - """ - self._auth_handler._complete_sso_login( - registered_user_id, - "", - request, - client_redirect_url, + cached_function: The cached function that needs an invalidation + keys: keys of the entry to invalidate, usually matching the arguments of the + cached function. + """ + cached_func.invalidate(keys) + await self._store.send_invalidation_to_replication( + f"{cached_func.__module__}.{cached_func.__name__}", + keys, ) async def complete_sso_login_async( diff --git a/synapse/push/__init__.py b/synapse/push/__init__.py index 57c4d70466b6..a0c760239db8 100644 --- a/synapse/push/__init__.py +++ b/synapse/push/__init__.py @@ -116,6 +116,8 @@ class PusherConfig: last_stream_ordering: int last_success: Optional[int] failing_since: Optional[int] + enabled: bool + device_id: Optional[str] def as_dict(self) -> Dict[str, Any]: """Information that can be retrieved about a pusher after creation.""" @@ -128,6 +130,8 @@ def as_dict(self) -> Dict[str, Any]: "lang": self.lang, "profile_tag": self.profile_tag, "pushkey": self.pushkey, + "enabled": self.enabled, + "device_id": self.device_id, } diff --git a/synapse/push/baserules.py b/synapse/push/baserules.py deleted file mode 100644 index 440205e80c05..000000000000 --- a/synapse/push/baserules.py +++ /dev/null @@ -1,583 +0,0 @@ -# Copyright 2015, 2016 OpenMarket Ltd -# Copyright 2017 New Vector Ltd -# Copyright 2019 The Matrix.org Foundation C.I.C. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Push rules is the system used to determine which events trigger a push (and a -bump in notification counts). - -This consists of a list of "push rules" for each user, where a push rule is a -pair of "conditions" and "actions". When a user receives an event Synapse -iterates over the list of push rules until it finds one where all the conditions -match the event, at which point "actions" describe the outcome (e.g. notify, -highlight, etc). - -Push rules are split up into 5 different "kinds" (aka "priority classes"), which -are run in order: - 1. Override — highest priority rules, e.g. always ignore notices - 2. Content — content specific rules, e.g. @ notifications - 3. Room — per room rules, e.g. enable/disable notifications for all messages - in a room - 4. Sender — per sender rules, e.g. never notify for messages from a given - user - 5. Underride — the lowest priority "default" rules, e.g. notify for every - message. - -The set of "base rules" are the list of rules that every user has by default. A -user can modify their copy of the push rules in one of three ways: - - 1. Adding a new push rule of a certain kind - 2. Changing the actions of a base rule - 3. Enabling/disabling a base rule. - -The base rules are split into whether they come before or after a particular -kind, so the order of push rule evaluation would be: base rules for before -"override" kind, user defined "override" rules, base rules after "override" -kind, etc, etc. -""" - -import itertools -import logging -from typing import Dict, Iterator, List, Mapping, Sequence, Tuple, Union - -import attr - -from synapse.config.experimental import ExperimentalConfig -from synapse.push.rulekinds import PRIORITY_CLASS_MAP - -logger = logging.getLogger(__name__) - - -@attr.s(auto_attribs=True, slots=True, frozen=True) -class PushRule: - """A push rule - - Attributes: - rule_id: a unique ID for this rule - priority_class: what "kind" of push rule this is (see - `PRIORITY_CLASS_MAP` for mapping between int and kind) - conditions: the sequence of conditions that all need to match - actions: the actions to apply if all conditions are met - default: is this a base rule? - default_enabled: is this enabled by default? - """ - - rule_id: str - priority_class: int - conditions: Sequence[Mapping[str, str]] - actions: Sequence[Union[str, Mapping]] - default: bool = False - default_enabled: bool = True - - -@attr.s(auto_attribs=True, slots=True, frozen=True, weakref_slot=False) -class PushRules: - """A collection of push rules for an account. - - Can be iterated over, producing push rules in priority order. - """ - - # A mapping from rule ID to push rule that overrides a base rule. These will - # be returned instead of the base rule. - overriden_base_rules: Dict[str, PushRule] = attr.Factory(dict) - - # The following stores the custom push rules at each priority class. - # - # We keep these separate (rather than combining into one big list) to avoid - # copying the base rules around all the time. - override: List[PushRule] = attr.Factory(list) - content: List[PushRule] = attr.Factory(list) - room: List[PushRule] = attr.Factory(list) - sender: List[PushRule] = attr.Factory(list) - underride: List[PushRule] = attr.Factory(list) - - def __iter__(self) -> Iterator[PushRule]: - # When iterating over the push rules we need to return the base rules - # interspersed at the correct spots. - for rule in itertools.chain( - BASE_PREPEND_OVERRIDE_RULES, - self.override, - BASE_APPEND_OVERRIDE_RULES, - self.content, - BASE_APPEND_CONTENT_RULES, - self.room, - self.sender, - self.underride, - BASE_APPEND_UNDERRIDE_RULES, - ): - # Check if a base rule has been overriden by a custom rule. If so - # return that instead. - override_rule = self.overriden_base_rules.get(rule.rule_id) - if override_rule: - yield override_rule - else: - yield rule - - def __len__(self) -> int: - # The length is mostly used by caches to get a sense of "size" / amount - # of memory this object is using, so we only count the number of custom - # rules. - return ( - len(self.overriden_base_rules) - + len(self.override) - + len(self.content) - + len(self.room) - + len(self.sender) - + len(self.underride) - ) - - -@attr.s(auto_attribs=True, slots=True, frozen=True, weakref_slot=False) -class FilteredPushRules: - """A wrapper around `PushRules` that filters out disabled experimental push - rules, and includes the "enabled" state for each rule when iterated over. - """ - - push_rules: PushRules - enabled_map: Dict[str, bool] - experimental_config: ExperimentalConfig - - def __iter__(self) -> Iterator[Tuple[PushRule, bool]]: - for rule in self.push_rules: - if not _is_experimental_rule_enabled( - rule.rule_id, self.experimental_config - ): - continue - - enabled = self.enabled_map.get(rule.rule_id, rule.default_enabled) - - yield rule, enabled - - def __len__(self) -> int: - return len(self.push_rules) - - -DEFAULT_EMPTY_PUSH_RULES = PushRules() - - -def compile_push_rules(rawrules: List[PushRule]) -> PushRules: - """Given a set of custom push rules return a `PushRules` instance (which - includes the base rules). - """ - - if not rawrules: - # Fast path to avoid allocating empty lists when there are no custom - # rules for the user. - return DEFAULT_EMPTY_PUSH_RULES - - rules = PushRules() - - for rule in rawrules: - # We need to decide which bucket each custom push rule goes into. - - # If it has the same ID as a base rule then it overrides that... - overriden_base_rule = BASE_RULES_BY_ID.get(rule.rule_id) - if overriden_base_rule: - rules.overriden_base_rules[rule.rule_id] = attr.evolve( - overriden_base_rule, actions=rule.actions - ) - continue - - # ... otherwise it gets added to the appropriate priority class bucket - collection: List[PushRule] - if rule.priority_class == 5: - collection = rules.override - elif rule.priority_class == 4: - collection = rules.content - elif rule.priority_class == 3: - collection = rules.room - elif rule.priority_class == 2: - collection = rules.sender - elif rule.priority_class == 1: - collection = rules.underride - elif rule.priority_class <= 0: - logger.info( - "Got rule with priority class less than zero, but doesn't override a base rule: %s", - rule, - ) - continue - else: - # We log and continue here so as not to break event sending - logger.error("Unknown priority class: %", rule.priority_class) - continue - - collection.append(rule) - - return rules - - -def _is_experimental_rule_enabled( - rule_id: str, experimental_config: ExperimentalConfig -) -> bool: - """Used by `FilteredPushRules` to filter out experimental rules when they - have not been enabled. - """ - if ( - rule_id == "global/override/.org.matrix.msc3786.rule.room.server_acl" - and not experimental_config.msc3786_enabled - ): - return False - if ( - rule_id == "global/underride/.org.matrix.msc3772.thread_reply" - and not experimental_config.msc3772_enabled - ): - return False - return True - - -BASE_APPEND_CONTENT_RULES = [ - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["content"], - rule_id="global/content/.m.rule.contains_user_name", - conditions=[ - { - "kind": "event_match", - "key": "content.body", - # Match the localpart of the requester's MXID. - "pattern_type": "user_localpart", - } - ], - actions=[ - "notify", - {"set_tweak": "sound", "value": "default"}, - {"set_tweak": "highlight"}, - ], - ) -] - - -BASE_PREPEND_OVERRIDE_RULES = [ - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["override"], - rule_id="global/override/.m.rule.master", - default_enabled=False, - conditions=[], - actions=["dont_notify"], - ) -] - - -BASE_APPEND_OVERRIDE_RULES = [ - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["override"], - rule_id="global/override/.m.rule.suppress_notices", - conditions=[ - { - "kind": "event_match", - "key": "content.msgtype", - "pattern": "m.notice", - "_cache_key": "_suppress_notices", - } - ], - actions=["dont_notify"], - ), - # NB. .m.rule.invite_for_me must be higher prio than .m.rule.member_event - # otherwise invites will be matched by .m.rule.member_event - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["override"], - rule_id="global/override/.m.rule.invite_for_me", - conditions=[ - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.member", - "_cache_key": "_member", - }, - { - "kind": "event_match", - "key": "content.membership", - "pattern": "invite", - "_cache_key": "_invite_member", - }, - # Match the requester's MXID. - {"kind": "event_match", "key": "state_key", "pattern_type": "user_id"}, - ], - actions=[ - "notify", - {"set_tweak": "sound", "value": "default"}, - {"set_tweak": "highlight", "value": False}, - ], - ), - # Will we sometimes want to know about people joining and leaving? - # Perhaps: if so, this could be expanded upon. Seems the most usual case - # is that we don't though. We add this override rule so that even if - # the room rule is set to notify, we don't get notifications about - # join/leave/avatar/displayname events. - # See also: https://matrix.org/jira/browse/SYN-607 - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["override"], - rule_id="global/override/.m.rule.member_event", - conditions=[ - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.member", - "_cache_key": "_member", - } - ], - actions=["dont_notify"], - ), - # This was changed from underride to override so it's closer in priority - # to the content rules where the user name highlight rule lives. This - # way a room rule is lower priority than both but a custom override rule - # is higher priority than both. - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["override"], - rule_id="global/override/.m.rule.contains_display_name", - conditions=[{"kind": "contains_display_name"}], - actions=[ - "notify", - {"set_tweak": "sound", "value": "default"}, - {"set_tweak": "highlight"}, - ], - ), - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["override"], - rule_id="global/override/.m.rule.roomnotif", - conditions=[ - { - "kind": "event_match", - "key": "content.body", - "pattern": "@room", - "_cache_key": "_roomnotif_content", - }, - { - "kind": "sender_notification_permission", - "key": "room", - "_cache_key": "_roomnotif_pl", - }, - ], - actions=["notify", {"set_tweak": "highlight", "value": True}], - ), - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["override"], - rule_id="global/override/.m.rule.tombstone", - conditions=[ - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.tombstone", - "_cache_key": "_tombstone", - }, - { - "kind": "event_match", - "key": "state_key", - "pattern": "", - "_cache_key": "_tombstone_statekey", - }, - ], - actions=["notify", {"set_tweak": "highlight", "value": True}], - ), - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["override"], - rule_id="global/override/.m.rule.reaction", - conditions=[ - { - "kind": "event_match", - "key": "type", - "pattern": "m.reaction", - "_cache_key": "_reaction", - } - ], - actions=["dont_notify"], - ), - # XXX: This is an experimental rule that is only enabled if msc3786_enabled - # is enabled, if it is not the rule gets filtered out in _load_rules() in - # PushRulesWorkerStore - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["override"], - rule_id="global/override/.org.matrix.msc3786.rule.room.server_acl", - conditions=[ - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.server_acl", - "_cache_key": "_room_server_acl", - }, - { - "kind": "event_match", - "key": "state_key", - "pattern": "", - "_cache_key": "_room_server_acl_state_key", - }, - ], - actions=[], - ), -] - - -BASE_APPEND_UNDERRIDE_RULES = [ - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["underride"], - rule_id="global/underride/.m.rule.call", - conditions=[ - { - "kind": "event_match", - "key": "type", - "pattern": "m.call.invite", - "_cache_key": "_call", - } - ], - actions=[ - "notify", - {"set_tweak": "sound", "value": "ring"}, - {"set_tweak": "highlight", "value": False}, - ], - ), - # XXX: once m.direct is standardised everywhere, we should use it to detect - # a DM from the user's perspective rather than this heuristic. - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["underride"], - rule_id="global/underride/.m.rule.room_one_to_one", - conditions=[ - {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"}, - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.message", - "_cache_key": "_message", - }, - ], - actions=[ - "notify", - {"set_tweak": "sound", "value": "default"}, - {"set_tweak": "highlight", "value": False}, - ], - ), - # XXX: this is going to fire for events which aren't m.room.messages - # but are encrypted (e.g. m.call.*)... - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["underride"], - rule_id="global/underride/.m.rule.encrypted_room_one_to_one", - conditions=[ - {"kind": "room_member_count", "is": "2", "_cache_key": "member_count"}, - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.encrypted", - "_cache_key": "_encrypted", - }, - ], - actions=[ - "notify", - {"set_tweak": "sound", "value": "default"}, - {"set_tweak": "highlight", "value": False}, - ], - ), - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["underride"], - rule_id="global/underride/.org.matrix.msc3772.thread_reply", - conditions=[ - { - "kind": "org.matrix.msc3772.relation_match", - "rel_type": "m.thread", - # Match the requester's MXID. - "sender_type": "user_id", - } - ], - actions=["notify", {"set_tweak": "highlight", "value": False}], - ), - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["underride"], - rule_id="global/underride/.m.rule.message", - conditions=[ - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.message", - "_cache_key": "_message", - } - ], - actions=["notify", {"set_tweak": "highlight", "value": False}], - ), - # XXX: this is going to fire for events which aren't m.room.messages - # but are encrypted (e.g. m.call.*)... - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["underride"], - rule_id="global/underride/.m.rule.encrypted", - conditions=[ - { - "kind": "event_match", - "key": "type", - "pattern": "m.room.encrypted", - "_cache_key": "_encrypted", - } - ], - actions=["notify", {"set_tweak": "highlight", "value": False}], - ), - PushRule( - default=True, - priority_class=PRIORITY_CLASS_MAP["underride"], - rule_id="global/underride/.im.vector.jitsi", - conditions=[ - { - "kind": "event_match", - "key": "type", - "pattern": "im.vector.modular.widgets", - "_cache_key": "_type_modular_widgets", - }, - { - "kind": "event_match", - "key": "content.type", - "pattern": "jitsi", - "_cache_key": "_content_type_jitsi", - }, - { - "kind": "event_match", - "key": "state_key", - "pattern": "*", - "_cache_key": "_is_state_event", - }, - ], - actions=["notify", {"set_tweak": "highlight", "value": False}], - ), -] - - -BASE_RULE_IDS = set() - -BASE_RULES_BY_ID: Dict[str, PushRule] = {} - -for r in BASE_APPEND_CONTENT_RULES: - BASE_RULE_IDS.add(r.rule_id) - BASE_RULES_BY_ID[r.rule_id] = r - -for r in BASE_PREPEND_OVERRIDE_RULES: - BASE_RULE_IDS.add(r.rule_id) - BASE_RULES_BY_ID[r.rule_id] = r - -for r in BASE_APPEND_OVERRIDE_RULES: - BASE_RULE_IDS.add(r.rule_id) - BASE_RULES_BY_ID[r.rule_id] = r - -for r in BASE_APPEND_UNDERRIDE_RULES: - BASE_RULE_IDS.add(r.rule_id) - BASE_RULES_BY_ID[r.rule_id] = r diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 3846fbc5f042..d2c306b1d4b9 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -30,18 +30,18 @@ from prometheus_client import Counter -from synapse.api.constants import EventTypes, Membership, RelationTypes +from synapse.api.constants import MAIN_TIMELINE, EventTypes, Membership, RelationTypes from synapse.event_auth import auth_types_for_event, get_user_power_level from synapse.events import EventBase, relation_from_event from synapse.events.snapshot import EventContext from synapse.state import POWER_KEY from synapse.storage.databases.main.roommember import EventIdMembership from synapse.storage.state import StateFilter +from synapse.synapse_rust.push import FilteredPushRules, PushRule from synapse.util.caches import register_cache from synapse.util.metrics import measure_func from synapse.visibility import filter_event_for_clients_with_state -from .baserules import FilteredPushRules, PushRule from .push_rule_evaluator import PushRuleEvaluatorForEvent if TYPE_CHECKING: @@ -277,10 +277,11 @@ async def action_for_event_by_user( # If the event does not have a relation, then cannot have any mutual # relations or thread ID. relations = {} - thread_id = "main" + thread_id = MAIN_TIMELINE if relation: relations = await self._get_mutual_relations( - relation.parent_id, itertools.chain(*rules_by_user.values()) + relation.parent_id, + itertools.chain(*(r.rules() for r in rules_by_user.values())), ) if relation.rel_type == RelationTypes.THREAD: thread_id = relation.parent_id @@ -333,7 +334,7 @@ async def action_for_event_by_user( # current user, it'll be added to the dict later. actions_by_user[uid] = [] - for rule, enabled in rules: + for rule, enabled in rules.rules(): if not enabled: continue diff --git a/synapse/push/clientformat.py b/synapse/push/clientformat.py index 73618d9234bf..ebc13beda1bc 100644 --- a/synapse/push/clientformat.py +++ b/synapse/push/clientformat.py @@ -16,10 +16,9 @@ from typing import Any, Dict, List, Optional from synapse.push.rulekinds import PRIORITY_CLASS_INVERSE_MAP, PRIORITY_CLASS_MAP +from synapse.synapse_rust.push import FilteredPushRules, PushRule from synapse.types import UserID -from .baserules import FilteredPushRules, PushRule - def format_push_rules_for_user( user: UserID, ruleslist: FilteredPushRules @@ -34,7 +33,7 @@ def format_push_rules_for_user( rules["global"] = _add_empty_priority_class_arrays(rules["global"]) - for r, enabled in ruleslist: + for r, enabled in ruleslist.rules(): template_name = _priority_class_to_template_name(r.priority_class) rulearray = rules["global"][template_name] diff --git a/synapse/push/pusherpool.py b/synapse/push/pusherpool.py index 1e0ef44fc786..e2648cbc93c9 100644 --- a/synapse/push/pusherpool.py +++ b/synapse/push/pusherpool.py @@ -94,7 +94,7 @@ def start(self) -> None: return run_as_background_process("start_pushers", self._start_pushers) - async def add_pusher( + async def add_or_update_pusher( self, user_id: str, access_token: Optional[int], @@ -106,6 +106,8 @@ async def add_pusher( lang: Optional[str], data: JsonDict, profile_tag: str = "", + enabled: bool = True, + device_id: Optional[str] = None, ) -> Optional[Pusher]: """Creates a new pusher and adds it to the pool @@ -147,9 +149,22 @@ async def add_pusher( last_stream_ordering=last_stream_ordering, last_success=None, failing_since=None, + enabled=enabled, + device_id=device_id, ) ) + # Before we actually persist the pusher, we check if the user already has one + # this app ID and pushkey. If so, we want to keep the access token and device ID + # in place, since this could be one device modifying (e.g. enabling/disabling) + # another device's pusher. + existing_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey( + user_id, app_id, pushkey + ) + if existing_config: + access_token = existing_config.access_token + device_id = existing_config.device_id + await self.store.add_pusher( user_id=user_id, access_token=access_token, @@ -163,8 +178,10 @@ async def add_pusher( data=data, last_stream_ordering=last_stream_ordering, profile_tag=profile_tag, + enabled=enabled, + device_id=device_id, ) - pusher = await self.start_pusher_by_id(app_id, pushkey, user_id) + pusher = await self.process_pusher_change_by_id(app_id, pushkey, user_id) return pusher @@ -276,10 +293,25 @@ async def on_new_receipts( except Exception: logger.exception("Exception in pusher on_new_receipts") - async def start_pusher_by_id( + async def _get_pusher_config_for_user_by_app_id_and_pushkey( + self, user_id: str, app_id: str, pushkey: str + ) -> Optional[PusherConfig]: + resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey) + + pusher_config = None + for r in resultlist: + if r.user_name == user_id: + pusher_config = r + + return pusher_config + + async def process_pusher_change_by_id( self, app_id: str, pushkey: str, user_id: str ) -> Optional[Pusher]: - """Look up the details for the given pusher, and start it + """Look up the details for the given pusher, and either start it if its + "enabled" flag is True, or try to stop it otherwise. + + If the pusher is new and its "enabled" flag is False, the stop is a noop. Returns: The pusher started, if any @@ -290,12 +322,13 @@ async def start_pusher_by_id( if not self._pusher_shard_config.should_handle(self._instance_name, user_id): return None - resultlist = await self.store.get_pushers_by_app_id_and_pushkey(app_id, pushkey) + pusher_config = await self._get_pusher_config_for_user_by_app_id_and_pushkey( + user_id, app_id, pushkey + ) - pusher_config = None - for r in resultlist: - if r.user_name == user_id: - pusher_config = r + if pusher_config and not pusher_config.enabled: + self.maybe_stop_pusher(app_id, pushkey, user_id) + return None pusher = None if pusher_config: @@ -305,7 +338,7 @@ async def start_pusher_by_id( async def _start_pushers(self) -> None: """Start all the pushers""" - pushers = await self.store.get_all_pushers() + pushers = await self.store.get_enabled_pushers() # Stagger starting up the pushers so we don't completely drown the # process on start up. @@ -363,6 +396,8 @@ async def _start_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]: synapse_pushers.labels(type(pusher).__name__, pusher.app_id).inc() + logger.info("Starting pusher %s / %s", pusher.user_id, appid_pushkey) + # Check if there *may* be push to process. We do this as this check is a # lot cheaper to do than actually fetching the exact rows we need to # push. @@ -382,16 +417,7 @@ async def _start_pusher(self, pusher_config: PusherConfig) -> Optional[Pusher]: return pusher async def remove_pusher(self, app_id: str, pushkey: str, user_id: str) -> None: - appid_pushkey = "%s:%s" % (app_id, pushkey) - - byuser = self.pushers.get(user_id, {}) - - if appid_pushkey in byuser: - logger.info("Stopping pusher %s / %s", user_id, appid_pushkey) - pusher = byuser.pop(appid_pushkey) - pusher.on_stop() - - synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec() + self.maybe_stop_pusher(app_id, pushkey, user_id) # We can only delete pushers on master. if self._remove_pusher_client: @@ -402,3 +428,22 @@ async def remove_pusher(self, app_id: str, pushkey: str, user_id: str) -> None: await self.store.delete_pusher_by_app_id_pushkey_user_id( app_id, pushkey, user_id ) + + def maybe_stop_pusher(self, app_id: str, pushkey: str, user_id: str) -> None: + """Stops a pusher with the given app ID and push key if one is running. + + Args: + app_id: the pusher's app ID. + pushkey: the pusher's push key. + user_id: the user the pusher belongs to. Only used for logging. + """ + appid_pushkey = "%s:%s" % (app_id, pushkey) + + byuser = self.pushers.get(user_id, {}) + + if appid_pushkey in byuser: + logger.info("Stopping pusher %s / %s", user_id, appid_pushkey) + pusher = byuser.pop(appid_pushkey) + pusher.on_stop() + + synapse_pushers.labels(type(pusher).__name__, pusher.app_id).dec() diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 93d53d68db0e..b2522f98cade 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -189,7 +189,9 @@ async def on_rdata( if row.deleted: self.stop_pusher(row.user_id, row.app_id, row.pushkey) else: - await self.start_pusher(row.user_id, row.app_id, row.pushkey) + await self.process_pusher_change( + row.user_id, row.app_id, row.pushkey + ) elif stream_name == EventsStream.NAME: # We shouldn't get multiple rows per token for events stream, so # we don't need to optimise this for multiple rows. @@ -334,13 +336,15 @@ def stop_pusher(self, user_id: str, app_id: str, pushkey: str) -> None: logger.info("Stopping pusher %r / %r", user_id, key) pusher.on_stop() - async def start_pusher(self, user_id: str, app_id: str, pushkey: str) -> None: + async def process_pusher_change( + self, user_id: str, app_id: str, pushkey: str + ) -> None: if not self._notify_pushers: return key = "%s:%s" % (app_id, pushkey) logger.info("Starting pusher %r / %r", user_id, key) - await self._pusher_pool.start_pusher_by_id(app_id, pushkey, user_id) + await self._pusher_pool.process_pusher_change_by_id(app_id, pushkey, user_id) class FederationSenderHandler: diff --git a/synapse/rest/__init__.py b/synapse/rest/__init__.py index b71221511209..9a2ab99ede87 100644 --- a/synapse/rest/__init__.py +++ b/synapse/rest/__init__.py @@ -30,6 +30,7 @@ keys, knock, login as v1_login, + login_token_request, logout, mutual_rooms, notifications, @@ -130,3 +131,4 @@ def register_servlets(client_resource: HttpServer, hs: "HomeServer") -> None: # unstable mutual_rooms.register_servlets(hs, client_resource) + login_token_request.register_servlets(hs, client_resource) diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index bac754e1b1d5..885669f9c779 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -80,6 +80,7 @@ SearchUsersRestServlet, ShadowBanRestServlet, UserAdminServlet, + UserByExternalId, UserMembershipRestServlet, UserRegisterServlet, UserRestServletV2, @@ -275,6 +276,7 @@ def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ListDestinationsRestServlet(hs).register(http_server) RoomMessagesRestServlet(hs).register(http_server) RoomTimestampToEventRestServlet(hs).register(http_server) + UserByExternalId(hs).register(http_server) # Some servlets only get registered for the main process. if hs.config.worker.worker_app is None: diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index 78ee9b6532b6..1274773d7eb0 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -375,7 +375,7 @@ async def on_PUT( and self.hs.config.email.email_notif_for_new_users and medium == "email" ): - await self.pusher_pool.add_pusher( + await self.pusher_pool.add_or_update_pusher( user_id=user_id, access_token=None, kind="email", @@ -383,7 +383,7 @@ async def on_PUT( app_display_name="Email Notifications", device_display_name=address, pushkey=address, - lang=None, # We don't know a user's language here + lang=None, data={}, ) @@ -1156,3 +1156,30 @@ async def on_GET( "rooms": by_room_data, }, } + + +class UserByExternalId(RestServlet): + """Find a user based on an external ID from an auth provider""" + + PATTERNS = admin_patterns( + "/auth_providers/(?P[^/]*)/users/(?P[^/]*)" + ) + + def __init__(self, hs: "HomeServer"): + self._auth = hs.get_auth() + self._store = hs.get_datastores().main + + async def on_GET( + self, + request: SynapseRequest, + provider: str, + external_id: str, + ) -> Tuple[int, JsonDict]: + await assert_requester_is_admin(self._auth, request) + + user_id = await self._store.get_user_by_external_id(provider, external_id) + + if user_id is None: + raise NotFoundError("User not found") + + return HTTPStatus.OK, {"user_id": user_id} diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index 2db2a04f95df..44f622bcce15 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -534,6 +534,11 @@ class AddThreepidMsisdnSubmitTokenServlet(RestServlet): "/add_threepid/msisdn/submit_token$", releases=(), unstable=True ) + class PostBody(RequestBodyModel): + client_secret: ClientSecretStr + sid: StrictStr + token: StrictStr + def __init__(self, hs: "HomeServer"): super().__init__() self.config = hs.config @@ -549,16 +554,14 @@ async def on_POST(self, request: Request) -> Tuple[int, JsonDict]: "instead.", ) - body = parse_json_object_from_request(request) - assert_params_in_dict(body, ["client_secret", "sid", "token"]) - assert_valid_client_secret(body["client_secret"]) + body = parse_and_validate_json_object_from_request(request, self.PostBody) # Proxy submit_token request to msisdn threepid delegate response = await self.identity_handler.proxy_msisdn_submit_token( self.config.registration.account_threepid_delegate_msisdn, - body["client_secret"], - body["sid"], - body["token"], + body.client_secret, + body.sid, + body.token, ) return 200, response @@ -581,6 +584,10 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: return 200, {"threepids": threepids} + # NOTE(dmr): I have chosen not to use Pydantic to parse this request's body, because + # the endpoint is deprecated. (If you really want to, you could do this by reusing + # ThreePidBindRestServelet.PostBody with an `alias_generator` to handle + # `threePidCreds` versus `three_pid_creds`. async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if not self.hs.config.registration.enable_3pid_changes: raise SynapseError( diff --git a/synapse/rest/client/login_token_request.py b/synapse/rest/client/login_token_request.py new file mode 100644 index 000000000000..ca5c54bf17a0 --- /dev/null +++ b/synapse/rest/client/login_token_request.py @@ -0,0 +1,94 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from typing import TYPE_CHECKING, Tuple + +from synapse.http.server import HttpServer +from synapse.http.servlet import RestServlet, parse_json_object_from_request +from synapse.http.site import SynapseRequest +from synapse.rest.client._base import client_patterns, interactive_auth_handler +from synapse.types import JsonDict + +if TYPE_CHECKING: + from synapse.server import HomeServer + +logger = logging.getLogger(__name__) + + +class LoginTokenRequestServlet(RestServlet): + """ + Get a token that can be used with `m.login.token` to log in a second device. + + Request: + + POST /login/token HTTP/1.1 + Content-Type: application/json + + {} + + Response: + + HTTP/1.1 200 OK + { + "login_token": "ABDEFGH", + "expires_in": 3600, + } + """ + + PATTERNS = client_patterns("/login/token$") + + def __init__(self, hs: "HomeServer"): + super().__init__() + self.auth = hs.get_auth() + self.store = hs.get_datastores().main + self.clock = hs.get_clock() + self.server_name = hs.config.server.server_name + self.macaroon_gen = hs.get_macaroon_generator() + self.auth_handler = hs.get_auth_handler() + self.token_timeout = hs.config.experimental.msc3882_token_timeout + self.ui_auth = hs.config.experimental.msc3882_ui_auth + + @interactive_auth_handler + async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: + requester = await self.auth.get_user_by_req(request) + body = parse_json_object_from_request(request) + + if self.ui_auth: + await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body, + "issue a new access token for your account", + can_skip_ui_auth=False, # Don't allow skipping of UI auth + ) + + login_token = self.macaroon_gen.generate_short_term_login_token( + user_id=requester.user.to_string(), + auth_provider_id="org.matrix.msc3882.login_token_request", + duration_in_ms=self.token_timeout, + ) + + return ( + 200, + { + "login_token": login_token, + "expires_in": self.token_timeout // 1000, + }, + ) + + +def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: + if hs.config.experimental.msc3882_enabled: + LoginTokenRequestServlet(hs).register(http_server) diff --git a/synapse/rest/client/pusher.py b/synapse/rest/client/pusher.py index 9a1f10f4befe..975eef2144b6 100644 --- a/synapse/rest/client/pusher.py +++ b/synapse/rest/client/pusher.py @@ -42,6 +42,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.hs = hs self.auth = hs.get_auth() + self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) @@ -51,9 +52,16 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: user.to_string() ) - filtered_pushers = [p.as_dict() for p in pushers] + pusher_dicts = [p.as_dict() for p in pushers] - return 200, {"pushers": filtered_pushers} + for pusher in pusher_dicts: + if self._msc3881_enabled: + pusher["org.matrix.msc3881.enabled"] = pusher["enabled"] + pusher["org.matrix.msc3881.device_id"] = pusher["device_id"] + del pusher["enabled"] + del pusher["device_id"] + + return 200, {"pushers": pusher_dicts} class PushersSetRestServlet(RestServlet): @@ -65,6 +73,7 @@ def __init__(self, hs: "HomeServer"): self.auth = hs.get_auth() self.notifier = hs.get_notifier() self.pusher_pool = self.hs.get_pusherpool() + self._msc3881_enabled = self.hs.config.experimental.msc3881_enabled async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: requester = await self.auth.get_user_by_req(request) @@ -103,6 +112,10 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: if "append" in content: append = content["append"] + enabled = True + if self._msc3881_enabled and "org.matrix.msc3881.enabled" in content: + enabled = content["org.matrix.msc3881.enabled"] + if not append: await self.pusher_pool.remove_pushers_by_app_id_and_pushkey_not_user( app_id=content["app_id"], @@ -111,7 +124,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: ) try: - await self.pusher_pool.add_pusher( + await self.pusher_pool.add_or_update_pusher( user_id=user.to_string(), access_token=requester.access_token_id, kind=content["kind"], @@ -122,6 +135,8 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: lang=content["lang"], data=content["data"], profile_tag=content.get("profile_tag", ""), + enabled=enabled, + device_id=requester.device_id, ) except PusherConfigException as pce: raise SynapseError( diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 430e152cdf75..280d30648317 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -108,6 +108,10 @@ def on_GET(self, request: Request) -> Tuple[int, JsonDict]: "org.matrix.msc3773": self.config.experimental.msc3773_enabled, # Allows moderators to fetch redacted event content as described in MSC2815 "fi.mau.msc2815": self.config.experimental.msc2815_enabled, + # Adds support for login token requests as per MSC3882 + "org.matrix.msc3882": self.config.experimental.msc3882_enabled, + # Adds support for remotely enabling/disabling pushers, as per MSC3881 + "org.matrix.msc3881": self.config.experimental.msc3881_enabled, }, }, ) diff --git a/synapse/state/v2.py b/synapse/state/v2.py index af03851c7142..1b9d7d84576e 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -577,6 +577,21 @@ async def _iterative_auth_checks( if ev.rejected_reason is None: auth_events[key] = event_map[ev_id] + if event.rejected_reason is not None: + # Do not admit previously rejected events into state. + # TODO: This isn't spec compliant. Events that were previously rejected due + # to failing auth checks at their state, but pass auth checks during + # state resolution should be accepted. Synapse does not handle the + # change of rejection status well, so we preserve the previous + # rejection status for now. + # + # Note that events rejected for non-state reasons, such as having the + # wrong auth events, should remain rejected. + # + # https://spec.matrix.org/v1.2/rooms/v9/#rejected-events + # https://github.com/matrix-org/synapse/issues/13797 + continue + try: event_auth.check_state_dependent_auth_rules( event, diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index e30f9c76d461..303a5d529800 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -15,12 +15,13 @@ # limitations under the License. import logging from abc import ABCMeta -from typing import TYPE_CHECKING, Any, Collection, Iterable, Optional, Union +from typing import TYPE_CHECKING, Any, Collection, Dict, Iterable, Optional, Union from synapse.storage.database import make_in_list_sql_clause # noqa: F401; noqa: F401 from synapse.storage.database import DatabasePool, LoggingDatabaseConnection from synapse.types import get_domain_from_id from synapse.util import json_decoder +from synapse.util.caches.descriptors import CachedFunction if TYPE_CHECKING: from synapse.server import HomeServer @@ -47,6 +48,8 @@ def __init__( self.database_engine = database.engine self.db_pool = database + self.external_cached_functions: Dict[str, CachedFunction] = {} + def process_replication_rows( self, stream_name: str, @@ -95,7 +98,7 @@ def _invalidate_state_caches( def _attempt_to_invalidate_cache( self, cache_name: str, key: Optional[Collection[Any]] - ) -> None: + ) -> bool: """Attempts to invalidate the cache of the given name, ignoring if the cache doesn't exist. Mainly used for invalidating caches on workers, where they may not have the cache. @@ -113,9 +116,12 @@ def _attempt_to_invalidate_cache( try: cache = getattr(self, cache_name) except AttributeError: - # We probably haven't pulled in the cache in this worker, - # which is fine. - return + # Check if an externally defined module cache has been registered + cache = self.external_cached_functions.get(cache_name) + if not cache: + # We probably haven't pulled in the cache in this worker, + # which is fine. + return False if key is None: cache.invalidate_all() @@ -125,6 +131,13 @@ def _attempt_to_invalidate_cache( invalidate_method = getattr(cache, "invalidate_local", cache.invalidate) invalidate_method(tuple(key)) + return True + + def register_external_cached_function( + self, cache_name: str, func: CachedFunction + ) -> None: + self.external_cached_functions[cache_name] = func + def db_to_json(db_content: Union[memoryview, bytes, bytearray, str]) -> Any: """ diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index bf5e7ee7be7e..2056ecb2c331 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -285,7 +285,10 @@ async def run_background_updates(self, sleep: bool) -> None: back_to_back_failures = 0 try: - logger.info("Starting background schema updates") + logger.info( + "Starting background schema updates for database %s", + self._database_name, + ) while self.enabled: try: result = await self.do_next_background_update(sleep) diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 12e9a423826a..2c421151c1be 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -33,7 +33,7 @@ ) from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.util.caches.descriptors import _CachedFunction +from synapse.util.caches.descriptors import CachedFunction from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -269,9 +269,7 @@ async def invalidate_cache_and_stream( return cache_func.invalidate(keys) - await self.db_pool.runInteraction( - "invalidate_cache_and_stream", - self._send_invalidation_to_replication, + await self.send_invalidation_to_replication( cache_func.__name__, keys, ) @@ -279,7 +277,7 @@ async def invalidate_cache_and_stream( def _invalidate_cache_and_stream( self, txn: LoggingTransaction, - cache_func: _CachedFunction, + cache_func: CachedFunction, keys: Tuple[Any, ...], ) -> None: """Invalidates the cache and adds it to the cache stream so slaves @@ -293,7 +291,7 @@ def _invalidate_cache_and_stream( self._send_invalidation_to_replication(txn, cache_func.__name__, keys) def _invalidate_all_cache_and_stream( - self, txn: LoggingTransaction, cache_func: _CachedFunction + self, txn: LoggingTransaction, cache_func: CachedFunction ) -> None: """Invalidates the entire cache and adds it to the cache stream so slaves will know to invalidate their caches. @@ -334,6 +332,16 @@ def _invalidate_state_caches_and_stream( txn, CURRENT_STATE_CACHE_NAME, [room_id] ) + async def send_invalidation_to_replication( + self, cache_name: str, keys: Optional[Collection[Any]] + ) -> None: + await self.db_pool.runInteraction( + "send_invalidation_to_replication", + self._send_invalidation_to_replication, + cache_name, + keys, + ) + def _send_invalidation_to_replication( self, txn: LoggingTransaction, cache_name: str, keys: Optional[Iterable[Any]] ) -> None: diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 16d1558925a4..11bdeb2a4448 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -88,7 +88,7 @@ import attr -from synapse.api.constants import ReceiptTypes +from synapse.api.constants import MAIN_TIMELINE, ReceiptTypes from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause from synapse.storage.database import ( @@ -437,8 +437,13 @@ def _get_unread_counts_by_pos_txn( and threads. """ - counts = NotifCounts() - thread_counts = {} + main_counts = NotifCounts() + thread_counts: Dict[str, NotifCounts] = {} + + def _get_thread(thread_id: str) -> NotifCounts: + if thread_id == MAIN_TIMELINE: + return main_counts + return thread_counts.setdefault(thread_id, NotifCounts()) receipt_types_clause, receipts_args = make_in_list_sql_clause( self.database_engine, @@ -485,7 +490,7 @@ def _get_unread_counts_by_pos_txn( AND ( (last_receipt_stream_ordering IS NULL AND stream_ordering > {receipt_stream_clause}) OR last_receipt_stream_ordering = {receipt_stream_clause} - ) + ) AND (notif_count != 0 OR COALESCE(unread_count, 0) != 0) """, ( user_id, @@ -500,15 +505,9 @@ def _get_unread_counts_by_pos_txn( summarised_threads = set() for notif_count, unread_count, thread_id in txn: summarised_threads.add(thread_id) - if thread_id == "main": - counts = NotifCounts( - notify_count=notif_count, unread_count=unread_count - ) - # TODO Delete zeroed out threads completely from the database. - elif notif_count or unread_count: - thread_counts[thread_id] = NotifCounts( - notify_count=notif_count, unread_count=unread_count - ) + counts = _get_thread(thread_id) + counts.notify_count += notif_count + counts.unread_count += unread_count # Next we need to count highlights, which aren't summarised sql = f""" @@ -541,15 +540,7 @@ def _get_unread_counts_by_pos_txn( ), ) for highlight_count, thread_id in txn: - if thread_id == "main": - counts.highlight_count += highlight_count - elif highlight_count: - if thread_id in thread_counts: - thread_counts[thread_id].highlight_count += highlight_count - else: - thread_counts[thread_id] = NotifCounts( - notify_count=0, unread_count=0, highlight_count=highlight_count - ) + _get_thread(thread_id).highlight_count += highlight_count # For threads which were summarised we need to count actions since the last # rotation. @@ -627,20 +618,11 @@ def _get_unread_counts_by_pos_txn( ), ) for notif_count, unread_count, thread_id in txn: - if thread_id == "main": - counts.notify_count += notif_count - counts.unread_count += unread_count - elif thread_id in thread_counts: - thread_counts[thread_id].notify_count += notif_count - thread_counts[thread_id].unread_count += unread_count - else: - thread_counts[thread_id] = NotifCounts( - notify_count=notif_count, - unread_count=unread_count, - highlight_count=0, - ) + counts = _get_thread(thread_id) + counts.notify_count += notif_count + counts.unread_count += unread_count - return RoomNotifCounts(counts, thread_counts) + return RoomNotifCounts(main_counts, thread_counts) def _get_notif_unread_count_for_user_room( self, @@ -1274,7 +1256,7 @@ def _handle_new_receipts_for_notifs_txn(self, txn: LoggingTransaction) -> bool: # Then any updated threads get their notification count and unread # count updated. - self.db_pool.simple_upsert_many_txn( + self.db_pool.simple_update_many_txn( txn, table="event_push_summary", key_names=("room_id", "user_id", "thread_id"), @@ -1511,7 +1493,10 @@ def _rotate_notifs_before_txn( ) async def _remove_old_push_actions_that_have_rotated(self) -> None: - """Clear out old push actions that have been summarised.""" + """ + Clear out old push actions that have been summarised (and are older than + 1 day ago). + """ # We want to clear out anything that is older than a day that *has* already # been rotated. diff --git a/synapse/storage/databases/main/purge_events.py b/synapse/storage/databases/main/purge_events.py index f6822707e43c..9213ce0b5abb 100644 --- a/synapse/storage/databases/main/purge_events.py +++ b/synapse/storage/databases/main/purge_events.py @@ -419,6 +419,7 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: "event_forward_extremities", "event_push_actions", "event_search", + "event_failed_pull_attempts", "partial_state_events", "events", "federation_inbound_events_staging", @@ -441,6 +442,10 @@ def _purge_room_txn(self, txn: LoggingTransaction, room_id: str) -> List[int]: "e2e_room_keys", "event_push_summary", "pusher_throttle", + "insertion_events", + "insertion_event_extremities", + "insertion_event_edges", + "batch_events", "room_account_data", "room_tags", # "rooms" happens last, to keep the foreign keys in the other tables diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index 5079edd1e083..ed17b2e70c04 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -30,9 +30,8 @@ from synapse.api.errors import StoreError from synapse.config.homeserver import ExperimentalConfig -from synapse.push.baserules import FilteredPushRules, PushRule, compile_push_rules from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker -from synapse.storage._base import SQLBaseStore, db_to_json +from synapse.storage._base import SQLBaseStore from synapse.storage.database import ( DatabasePool, LoggingDatabaseConnection, @@ -51,6 +50,7 @@ IdGenerator, StreamIdGenerator, ) +from synapse.synapse_rust.push import FilteredPushRules, PushRule, PushRules from synapse.types import JsonDict from synapse.util import json_encoder from synapse.util.caches.descriptors import cached, cachedList @@ -72,18 +72,25 @@ def _load_rules( """ ruleslist = [ - PushRule( + PushRule.from_db( rule_id=rawrule["rule_id"], priority_class=rawrule["priority_class"], - conditions=db_to_json(rawrule["conditions"]), - actions=db_to_json(rawrule["actions"]), + conditions=rawrule["conditions"], + actions=rawrule["actions"], ) for rawrule in rawrules ] - push_rules = compile_push_rules(ruleslist) + push_rules = PushRules( + ruleslist, + ) - filtered_rules = FilteredPushRules(push_rules, enabled_map, experimental_config) + filtered_rules = FilteredPushRules( + push_rules, + enabled_map, + msc3786_enabled=experimental_config.msc3786_enabled, + msc3772_enabled=experimental_config.msc3772_enabled, + ) return filtered_rules @@ -845,7 +852,7 @@ async def copy_push_rules_from_room_to_room_for_user( user_push_rules = await self.get_push_rules_for_user(user_id) # Get rules relating to the old room and copy them to the new room - for rule, enabled in user_push_rules: + for rule, enabled in user_push_rules.rules(): if not enabled: continue diff --git a/synapse/storage/databases/main/pusher.py b/synapse/storage/databases/main/pusher.py index bd0cfa7f3211..01206950a9d4 100644 --- a/synapse/storage/databases/main/pusher.py +++ b/synapse/storage/databases/main/pusher.py @@ -89,6 +89,11 @@ def _decode_pushers_rows(self, rows: Iterable[dict]) -> Iterator[PusherConfig]: ) continue + # If we're using SQLite, then boolean values are integers. This is + # troublesome since some code using the return value of this method might + # expect it to be a boolean, or will expose it to clients (in responses). + r["enabled"] = bool(r["enabled"]) + yield PusherConfig(**r) async def get_pushers_by_app_id_and_pushkey( @@ -100,38 +105,52 @@ async def get_pushers_by_user_id(self, user_id: str) -> Iterator[PusherConfig]: return await self.get_pushers_by({"user_name": user_id}) async def get_pushers_by(self, keyvalues: Dict[str, Any]) -> Iterator[PusherConfig]: - ret = await self.db_pool.simple_select_list( - "pushers", - keyvalues, - [ - "id", - "user_name", - "access_token", - "profile_tag", - "kind", - "app_id", - "app_display_name", - "device_display_name", - "pushkey", - "ts", - "lang", - "data", - "last_stream_ordering", - "last_success", - "failing_since", - ], + """Retrieve pushers that match the given criteria. + + Args: + keyvalues: A {column: value} dictionary. + + Returns: + The pushers for which the given columns have the given values. + """ + + def get_pushers_by_txn(txn: LoggingTransaction) -> List[Dict[str, Any]]: + # We could technically use simple_select_list here, but we need to call + # COALESCE on the 'enabled' column. While it is technically possible to give + # simple_select_list the whole `COALESCE(...) AS ...` as a column name, it + # feels a bit hacky, so it's probably better to just inline the query. + sql = """ + SELECT + id, user_name, access_token, profile_tag, kind, app_id, + app_display_name, device_display_name, pushkey, ts, lang, data, + last_stream_ordering, last_success, failing_since, + COALESCE(enabled, TRUE) AS enabled, device_id + FROM pushers + """ + + sql += "WHERE %s" % (" AND ".join("%s = ?" % (k,) for k in keyvalues),) + + txn.execute(sql, list(keyvalues.values())) + + return self.db_pool.cursor_to_dict(txn) + + ret = await self.db_pool.runInteraction( desc="get_pushers_by", + func=get_pushers_by_txn, ) + return self._decode_pushers_rows(ret) - async def get_all_pushers(self) -> Iterator[PusherConfig]: - def get_pushers(txn: LoggingTransaction) -> Iterator[PusherConfig]: - txn.execute("SELECT * FROM pushers") + async def get_enabled_pushers(self) -> Iterator[PusherConfig]: + def get_enabled_pushers_txn(txn: LoggingTransaction) -> Iterator[PusherConfig]: + txn.execute("SELECT * FROM pushers WHERE COALESCE(enabled, TRUE)") rows = self.db_pool.cursor_to_dict(txn) return self._decode_pushers_rows(rows) - return await self.db_pool.runInteraction("get_all_pushers", get_pushers) + return await self.db_pool.runInteraction( + "get_enabled_pushers", get_enabled_pushers_txn + ) async def get_all_updated_pushers_rows( self, instance_name: str, last_id: int, current_id: int, limit: int @@ -458,7 +477,74 @@ def _delete_pushers(txn: LoggingTransaction) -> int: return number_deleted -class PusherStore(PusherWorkerStore): +class PusherBackgroundUpdatesStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_update_handler( + "set_device_id_for_pushers", self._set_device_id_for_pushers + ) + + async def _set_device_id_for_pushers( + self, progress: JsonDict, batch_size: int + ) -> int: + """Background update to populate the device_id column of the pushers table.""" + last_pusher_id = progress.get("pusher_id", 0) + + def set_device_id_for_pushers_txn(txn: LoggingTransaction) -> int: + txn.execute( + """ + SELECT p.id, at.device_id + FROM pushers AS p + INNER JOIN access_tokens AS at + ON p.access_token = at.id + WHERE + p.access_token IS NOT NULL + AND at.device_id IS NOT NULL + AND p.id > ? + ORDER BY p.id + LIMIT ? + """, + (last_pusher_id, batch_size), + ) + + rows = self.db_pool.cursor_to_dict(txn) + if len(rows) == 0: + return 0 + + self.db_pool.simple_update_many_txn( + txn=txn, + table="pushers", + key_names=("id",), + key_values=[(row["id"],) for row in rows], + value_names=("device_id",), + value_values=[(row["device_id"],) for row in rows], + ) + + self.db_pool.updates._background_update_progress_txn( + txn, "set_device_id_for_pushers", {"pusher_id": rows[-1]["id"]} + ) + + return len(rows) + + nb_processed = await self.db_pool.runInteraction( + "set_device_id_for_pushers", set_device_id_for_pushers_txn + ) + + if nb_processed < batch_size: + await self.db_pool.updates._end_background_update( + "set_device_id_for_pushers" + ) + + return nb_processed + + +class PusherStore(PusherWorkerStore, PusherBackgroundUpdatesStore): def get_pushers_stream_token(self) -> int: return self._pushers_id_gen.get_current_token() @@ -476,6 +562,8 @@ async def add_pusher( data: Optional[JsonDict], last_stream_ordering: int, profile_tag: str = "", + enabled: bool = True, + device_id: Optional[str] = None, ) -> None: async with self._pushers_id_gen.get_next() as stream_id: # no need to lock because `pushers` has a unique key on @@ -494,6 +582,8 @@ async def add_pusher( "last_stream_ordering": last_stream_ordering, "profile_tag": profile_tag, "id": stream_id, + "enabled": enabled, + "device_id": device_id, }, desc="add_pusher", lock=False, diff --git a/synapse/storage/databases/main/relations.py b/synapse/storage/databases/main/relations.py index 7bd27790ebfe..898947af9536 100644 --- a/synapse/storage/databases/main/relations.py +++ b/synapse/storage/databases/main/relations.py @@ -51,6 +51,8 @@ class _RelatedEvent: event_id: str # The sender of the related event. sender: str + topological_ordering: Optional[int] + stream_ordering: int class RelationsWorkerStore(SQLBaseStore): @@ -91,6 +93,9 @@ async def get_relations_for_event( # it. The `event_id` must match the `event.event_id`. assert event.event_id == event_id + # Ensure bad limits aren't being passed in. + assert limit >= 0 + where_clause = ["relates_to_id = ?", "room_id = ?"] where_args: List[Union[str, int]] = [event.event_id, room_id] is_redacted = event.internal_metadata.is_redacted() @@ -139,21 +144,34 @@ def _get_recent_references_for_event_txn( ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]: txn.execute(sql, where_args + [limit + 1]) - last_topo_id = None - last_stream_id = None events = [] - for row in txn: + for event_id, relation_type, sender, topo_ordering, stream_ordering in txn: # Do not include edits for redacted events as they leak event # content. - if not is_redacted or row[1] != RelationTypes.REPLACE: - events.append(_RelatedEvent(row[0], row[2])) - last_topo_id = row[3] - last_stream_id = row[4] + if not is_redacted or relation_type != RelationTypes.REPLACE: + events.append( + _RelatedEvent(event_id, sender, topo_ordering, stream_ordering) + ) - # If there are more events, generate the next pagination key. + # If there are more events, generate the next pagination key from the + # last event returned. next_token = None - if len(events) > limit and last_topo_id and last_stream_id: - next_key = RoomStreamToken(last_topo_id, last_stream_id) + if len(events) > limit: + # Instead of using the last row (which tells us there is more + # data), use the last row to be returned. + events = events[:limit] + + topo = events[-1].topological_ordering + token = events[-1].stream_ordering + if direction == "b": + # Tokens are positions between events. + # This token points *after* the last event in the chunk. + # We need it to point to the event before it in the chunk + # when we are going backwards so we subtract one from the + # stream part. + token -= 1 + next_key = RoomStreamToken(topo, token) + if from_token: next_token = from_token.copy_and_replace( StreamKeyType.ROOM, next_key diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 3f9bfaeac5cb..530f04e149b3 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -1334,15 +1334,15 @@ def _paginate_room_events_txn( if rows: topo = rows[-1].topological_ordering - toke = rows[-1].stream_ordering + token = rows[-1].stream_ordering if direction == "b": # Tokens are positions between events. # This token points *after* the last event in the chunk. # We need it to point to the event before it in the chunk # when we are going backwards so we subtract one from the # stream part. - toke -= 1 - next_token = RoomStreamToken(topo, toke) + token -= 1 + next_token = RoomStreamToken(topo, token) else: # TODO (erikj): We should work out what to do here instead. next_token = to_token if to_token else from_token diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index 2d6ac659b448..96a87494c7ef 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -83,6 +83,8 @@ event_push_summary, receipts_linearized, and receipts_graph. - Add table `event_failed_pull_attempts` to keep track when we fail to pull events over federation. + - Add indexes to various tables (`event_failed_pull_attempts`, `insertion_events`, + `batch_events`) to make it easy to delete all associated rows when purging a room. """ diff --git a/synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql b/synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql new file mode 100644 index 000000000000..dba3b4900b59 --- /dev/null +++ b/synapse/storage/schema/main/delta/73/02add_pusher_enabled.sql @@ -0,0 +1,16 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +ALTER TABLE pushers ADD COLUMN enabled BOOLEAN; \ No newline at end of file diff --git a/synapse/storage/schema/main/delta/73/02room_id_indexes_for_purging.sql b/synapse/storage/schema/main/delta/73/02room_id_indexes_for_purging.sql new file mode 100644 index 000000000000..6d38bdd43034 --- /dev/null +++ b/synapse/storage/schema/main/delta/73/02room_id_indexes_for_purging.sql @@ -0,0 +1,22 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add index so we can easily purge all rows from a given `room_id` +CREATE INDEX IF NOT EXISTS event_failed_pull_attempts_room_id ON event_failed_pull_attempts(room_id); + +-- MSC2716 related tables: +-- Add indexes so we can easily purge all rows from a given `room_id` +CREATE INDEX IF NOT EXISTS insertion_events_room_id ON insertion_events(room_id); +CREATE INDEX IF NOT EXISTS batch_events_room_id ON batch_events(room_id); diff --git a/synapse/storage/schema/main/delta/73/03pusher_device_id.sql b/synapse/storage/schema/main/delta/73/03pusher_device_id.sql new file mode 100644 index 000000000000..1b4ffbeebec0 --- /dev/null +++ b/synapse/storage/schema/main/delta/73/03pusher_device_id.sql @@ -0,0 +1,20 @@ +/* Copyright 2022 The Matrix.org Foundation C.I.C + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Add a device_id column to track the device ID that created the pusher. It's NULLable +-- on purpose, because a) it might not be possible to track down the device that created +-- old pushers (pushers.access_token and access_tokens.device_id are both NULLable), and +-- b) access tokens retrieved via the admin API don't have a device associated to them. +ALTER TABLE pushers ADD COLUMN device_id TEXT; \ No newline at end of file diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 10aff4d04a97..3909f1caea24 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -53,7 +53,7 @@ F = TypeVar("F", bound=Callable[..., Any]) -class _CachedFunction(Generic[F]): +class CachedFunction(Generic[F]): invalidate: Any = None invalidate_all: Any = None prefill: Any = None @@ -242,7 +242,7 @@ def _wrapped(*args: Any, **kwargs: Any) -> Any: return ret2 - wrapped = cast(_CachedFunction, _wrapped) + wrapped = cast(CachedFunction, _wrapped) wrapped.cache = cache obj.__dict__[self.name] = wrapped @@ -363,7 +363,7 @@ def _wrapped(*args: Any, **kwargs: Any) -> Any: return make_deferred_yieldable(ret) - wrapped = cast(_CachedFunction, _wrapped) + wrapped = cast(CachedFunction, _wrapped) if self.num_args == 1: assert not self.tree @@ -572,7 +572,7 @@ def cached( iterable: bool = False, prune_unread_entries: bool = True, name: Optional[str] = None, -) -> Callable[[F], _CachedFunction[F]]: +) -> Callable[[F], CachedFunction[F]]: func = lambda orig: DeferredCacheDescriptor( orig, max_entries=max_entries, @@ -585,7 +585,7 @@ def cached( name=name, ) - return cast(Callable[[F], _CachedFunction[F]], func) + return cast(Callable[[F], CachedFunction[F]], func) def cachedList( @@ -594,7 +594,7 @@ def cachedList( list_name: str, num_args: Optional[int] = None, name: Optional[str] = None, -) -> Callable[[F], _CachedFunction[F]]: +) -> Callable[[F], CachedFunction[F]]: """Creates a descriptor that wraps a function in a `DeferredCacheListDescriptor`. Used to do batch lookups for an already created cache. One of the arguments @@ -631,7 +631,7 @@ def batch_do_something(self, first_arg, second_args): name=name, ) - return cast(Callable[[F], _CachedFunction[F]], func) + return cast(Callable[[F], CachedFunction[F]], func) def _get_cache_key_builder( diff --git a/tests/handlers/test_deactivate_account.py b/tests/handlers/test_deactivate_account.py index 7b9b711521d8..bce65fab7d84 100644 --- a/tests/handlers/test_deactivate_account.py +++ b/tests/handlers/test_deactivate_account.py @@ -15,11 +15,11 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.constants import AccountDataTypes -from synapse.push.baserules import PushRule from synapse.push.rulekinds import PRIORITY_CLASS_MAP from synapse.rest import admin from synapse.rest.client import account, login from synapse.server import HomeServer +from synapse.synapse_rust.push import PushRule from synapse.util import Clock from tests.unittest import HomeserverTestCase @@ -161,20 +161,15 @@ def test_push_rules_deleted_upon_account_deactivation(self) -> None: self._store.get_push_rules_for_user(self.user) ) # Filter out default rules; we don't care - push_rules = [r for r, _ in filtered_push_rules if self._is_custom_rule(r)] + push_rules = [ + r for r, _ in filtered_push_rules.rules() if self._is_custom_rule(r) + ] # Check our rule made it - self.assertEqual( - push_rules, - [ - PushRule( - rule_id="personal.override.rule1", - priority_class=5, - conditions=[], - actions=[], - ) - ], - push_rules, - ) + self.assertEqual(len(push_rules), 1) + self.assertEqual(push_rules[0].rule_id, "personal.override.rule1") + self.assertEqual(push_rules[0].priority_class, 5) + self.assertEqual(push_rules[0].conditions, []) + self.assertEqual(push_rules[0].actions, []) # Request the deactivation of our account self._deactivate_my_account() @@ -183,7 +178,9 @@ def test_push_rules_deleted_upon_account_deactivation(self) -> None: self._store.get_push_rules_for_user(self.user) ) # Filter out default rules; we don't care - push_rules = [r for r, _ in filtered_push_rules if self._is_custom_rule(r)] + push_rules = [ + r for r, _ in filtered_push_rules.rules() if self._is_custom_rule(r) + ] # Check our rule no longer exists self.assertEqual(push_rules, [], push_rules) diff --git a/tests/handlers/test_federation_event.py b/tests/handlers/test_federation_event.py index b5b89405a4f2..918010cddbf9 100644 --- a/tests/handlers/test_federation_event.py +++ b/tests/handlers/test_federation_event.py @@ -11,14 +11,23 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional from unittest import mock +from synapse.api.errors import AuthError +from synapse.api.room_versions import RoomVersion +from synapse.event_auth import ( + check_state_dependent_auth_rules, + check_state_independent_auth_rules, +) from synapse.events import make_event_from_dict from synapse.events.snapshot import EventContext from synapse.federation.transport.client import StateRequestResponse from synapse.logging.context import LoggingContext from synapse.rest import admin from synapse.rest.client import login, room +from synapse.state.v2 import _mainline_sort, _reverse_topological_power_sort +from synapse.types import JsonDict from tests import unittest from tests.test_utils import event_injection, make_awaitable @@ -449,3 +458,393 @@ def test_process_pulled_event_clears_backfill_attempts_after_being_successfully_ main_store.get_event(pulled_event.event_id, allow_none=True) ) self.assertIsNotNone(persisted, "pulled event was not persisted at all") + + def test_process_pulled_event_with_rejected_missing_state(self) -> None: + """Ensure that we correctly handle pulled events with missing state containing a + rejected state event + + In this test, we pretend we are processing a "pulled" event (eg, via backfill + or get_missing_events). The pulled event has a prev_event we haven't previously + seen, so the server requests the state at that prev_event. We expect the server + to make a /state request. + + We simulate a remote server whose /state includes a rejected kick event for a + local user. Notably, the kick event is rejected only because it cites a rejected + auth event and would otherwise be accepted based on the room state. During state + resolution, we re-run auth and can potentially introduce such rejected events + into the state if we are not careful. + + We check that the pulled event is correctly persisted, and that the state + afterwards does not include the rejected kick. + """ + # The DAG we are testing looks like: + # + # ... + # | + # v + # remote admin user joins + # | | + # +-------+ +-------+ + # | | + # | rejected power levels + # | from remote server + # | | + # | v + # | rejected kick of local user + # v from remote server + # new power levels | + # | v + # | missing event + # | from remote server + # | | + # +-------+ +-------+ + # | | + # v v + # pulled event + # from remote server + # + # (arrows are in the opposite direction to prev_events.) + + OTHER_USER = f"@user:{self.OTHER_SERVER_NAME}" + main_store = self.hs.get_datastores().main + + # Create the room. + kermit_user_id = self.register_user("kermit", "test") + kermit_tok = self.login("kermit", "test") + room_id = self.helper.create_room_as( + room_creator=kermit_user_id, tok=kermit_tok + ) + room_version = self.get_success(main_store.get_room_version(room_id)) + + # Add another local user to the room. This user is going to be kicked in a + # rejected event. + bert_user_id = self.register_user("bert", "test") + bert_tok = self.login("bert", "test") + self.helper.join(room_id, user=bert_user_id, tok=bert_tok) + + # Allow the remote user to kick bert. + # The remote user is going to send a rejected power levels event later on and we + # need state resolution to order it before another power levels event kermit is + # going to send later on. Hence we give both users the same power level, so that + # ties are broken by `origin_server_ts`. + self.helper.send_state( + room_id, + "m.room.power_levels", + {"users": {kermit_user_id: 100, OTHER_USER: 100}}, + tok=kermit_tok, + ) + + # Add the remote user to the room. + other_member_event = self.get_success( + event_injection.inject_member_event(self.hs, room_id, OTHER_USER, "join") + ) + + initial_state_map = self.get_success( + main_store.get_partial_current_state_ids(room_id) + ) + create_event = self.get_success( + main_store.get_event(initial_state_map[("m.room.create", "")]) + ) + bert_member_event = self.get_success( + main_store.get_event(initial_state_map[("m.room.member", bert_user_id)]) + ) + power_levels_event = self.get_success( + main_store.get_event(initial_state_map[("m.room.power_levels", "")]) + ) + + # We now need a rejected state event that will fail + # `check_state_independent_auth_rules` but pass + # `check_state_dependent_auth_rules`. + + # First, we create a power levels event that we pretend the remote server has + # accepted, but the local homeserver will reject. + next_depth = 100 + next_timestamp = other_member_event.origin_server_ts + 100 + rejected_power_levels_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "type": "m.room.power_levels", + "state_key": "", + "room_id": room_id, + "sender": OTHER_USER, + "prev_events": [other_member_event.event_id], + "auth_events": [ + initial_state_map[("m.room.create", "")], + initial_state_map[("m.room.power_levels", "")], + # The event will be rejected because of the duplicated auth + # event. + other_member_event.event_id, + other_member_event.event_id, + ], + "origin_server_ts": next_timestamp, + "depth": next_depth, + "content": power_levels_event.content, + } + ), + room_version, + ) + next_depth += 1 + next_timestamp += 100 + + with LoggingContext("send_rejected_power_levels_event"): + self.get_success( + self.hs.get_federation_event_handler()._process_pulled_event( + self.OTHER_SERVER_NAME, + rejected_power_levels_event, + backfilled=False, + ) + ) + self.assertEqual( + self.get_success( + main_store.get_rejection_reason( + rejected_power_levels_event.event_id + ) + ), + "auth_error", + ) + + # Then we create a kick event for a local user that cites the rejected power + # levels event in its auth events. The kick event will be rejected solely + # because of the rejected auth event and would otherwise be accepted. + rejected_kick_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "type": "m.room.member", + "state_key": bert_user_id, + "room_id": room_id, + "sender": OTHER_USER, + "prev_events": [rejected_power_levels_event.event_id], + "auth_events": [ + initial_state_map[("m.room.create", "")], + rejected_power_levels_event.event_id, + initial_state_map[("m.room.member", bert_user_id)], + initial_state_map[("m.room.member", OTHER_USER)], + ], + "origin_server_ts": next_timestamp, + "depth": next_depth, + "content": {"membership": "leave"}, + } + ), + room_version, + ) + next_depth += 1 + next_timestamp += 100 + + # The kick event must fail the state-independent auth rules, but pass the + # state-dependent auth rules, so that it has a chance of making it through state + # resolution. + self.get_failure( + check_state_independent_auth_rules(main_store, rejected_kick_event), + AuthError, + ) + check_state_dependent_auth_rules( + rejected_kick_event, + [create_event, power_levels_event, other_member_event, bert_member_event], + ) + + # The kick event must also win over the original member event during state + # resolution. + self.assertEqual( + self.get_success( + _mainline_sort( + self.clock, + room_id, + event_ids=[ + bert_member_event.event_id, + rejected_kick_event.event_id, + ], + resolved_power_event_id=power_levels_event.event_id, + event_map={ + bert_member_event.event_id: bert_member_event, + rejected_kick_event.event_id: rejected_kick_event, + }, + state_res_store=main_store, + ) + ), + [bert_member_event.event_id, rejected_kick_event.event_id], + "The rejected kick event will not be applied after bert's join event " + "during state resolution. The test setup is incorrect.", + ) + + with LoggingContext("send_rejected_kick_event"): + self.get_success( + self.hs.get_federation_event_handler()._process_pulled_event( + self.OTHER_SERVER_NAME, rejected_kick_event, backfilled=False + ) + ) + self.assertEqual( + self.get_success( + main_store.get_rejection_reason(rejected_kick_event.event_id) + ), + "auth_error", + ) + + # We need another power levels event which will win over the rejected one during + # state resolution, otherwise we hit other issues where we end up with rejected + # a power levels event during state resolution. + self.reactor.advance(100) # ensure the `origin_server_ts` is larger + new_power_levels_event = self.get_success( + main_store.get_event( + self.helper.send_state( + room_id, + "m.room.power_levels", + {"users": {kermit_user_id: 100, OTHER_USER: 100, bert_user_id: 1}}, + tok=kermit_tok, + )["event_id"] + ) + ) + self.assertEqual( + self.get_success( + _reverse_topological_power_sort( + self.clock, + room_id, + event_ids=[ + new_power_levels_event.event_id, + rejected_power_levels_event.event_id, + ], + event_map={}, + state_res_store=main_store, + full_conflicted_set=set(), + ) + ), + [rejected_power_levels_event.event_id, new_power_levels_event.event_id], + "The power levels events will not have the desired ordering during state " + "resolution. The test setup is incorrect.", + ) + + # Create a missing event, so that the local homeserver has to do a `/state` or + # `/state_ids` request to pull state from the remote homeserver. + missing_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "type": "m.room.message", + "room_id": room_id, + "sender": OTHER_USER, + "prev_events": [rejected_kick_event.event_id], + "auth_events": [ + initial_state_map[("m.room.create", "")], + initial_state_map[("m.room.power_levels", "")], + initial_state_map[("m.room.member", OTHER_USER)], + ], + "origin_server_ts": next_timestamp, + "depth": next_depth, + "content": {"msgtype": "m.text", "body": "foo"}, + } + ), + room_version, + ) + next_depth += 1 + next_timestamp += 100 + + # The pulled event has two prev events, one of which is missing. We will make a + # `/state` or `/state_ids` request to the remote homeserver to ask it for the + # state before the missing prev event. + pulled_event = make_event_from_dict( + self.add_hashes_and_signatures_from_other_server( + { + "type": "m.room.message", + "room_id": room_id, + "sender": OTHER_USER, + "prev_events": [ + new_power_levels_event.event_id, + missing_event.event_id, + ], + "auth_events": [ + initial_state_map[("m.room.create", "")], + new_power_levels_event.event_id, + initial_state_map[("m.room.member", OTHER_USER)], + ], + "origin_server_ts": next_timestamp, + "depth": next_depth, + "content": {"msgtype": "m.text", "body": "bar"}, + } + ), + room_version, + ) + next_depth += 1 + next_timestamp += 100 + + # Prepare the response for the `/state` or `/state_ids` request. + # The remote server believes bert has been kicked, while the local server does + # not. + state_before_missing_event = self.get_success( + main_store.get_events_as_list(initial_state_map.values()) + ) + state_before_missing_event = [ + event + for event in state_before_missing_event + if event.event_id != bert_member_event.event_id + ] + state_before_missing_event.append(rejected_kick_event) + + # We have to bump the clock a bit, to keep the retry logic in + # `FederationClient.get_pdu` happy + self.reactor.advance(60000) + with LoggingContext("send_pulled_event"): + + async def get_event( + destination: str, event_id: str, timeout: Optional[int] = None + ) -> JsonDict: + self.assertEqual(destination, self.OTHER_SERVER_NAME) + self.assertEqual(event_id, missing_event.event_id) + return {"pdus": [missing_event.get_pdu_json()]} + + async def get_room_state_ids( + destination: str, room_id: str, event_id: str + ) -> JsonDict: + self.assertEqual(destination, self.OTHER_SERVER_NAME) + self.assertEqual(event_id, missing_event.event_id) + return { + "pdu_ids": [event.event_id for event in state_before_missing_event], + "auth_chain_ids": [], + } + + async def get_room_state( + room_version: RoomVersion, destination: str, room_id: str, event_id: str + ) -> StateRequestResponse: + self.assertEqual(destination, self.OTHER_SERVER_NAME) + self.assertEqual(event_id, missing_event.event_id) + return StateRequestResponse( + state=state_before_missing_event, + auth_events=[], + ) + + self.mock_federation_transport_client.get_event.side_effect = get_event + self.mock_federation_transport_client.get_room_state_ids.side_effect = ( + get_room_state_ids + ) + self.mock_federation_transport_client.get_room_state.side_effect = ( + get_room_state + ) + + self.get_success( + self.hs.get_federation_event_handler()._process_pulled_event( + self.OTHER_SERVER_NAME, pulled_event, backfilled=False + ) + ) + self.assertIsNone( + self.get_success( + main_store.get_rejection_reason(pulled_event.event_id) + ), + "Pulled event was unexpectedly rejected, likely due to a problem with " + "the test setup.", + ) + self.assertEqual( + {pulled_event.event_id}, + self.get_success( + main_store.have_events_in_timeline([pulled_event.event_id]) + ), + "Pulled event was not persisted, likely due to a problem with the test " + "setup.", + ) + + # We must not accept rejected events into the room state, so we expect bert + # to not be kicked, even if the remote server believes so. + new_state_map = self.get_success( + main_store.get_partial_current_state_ids(room_id) + ) + self.assertEqual( + new_state_map[("m.room.member", bert_user_id)], + bert_member_event.event_id, + "Rejected kick event unexpectedly became part of room state.", + ) diff --git a/tests/push/test_email.py b/tests/push/test_email.py index 7a3b0d675592..fd14568f55ce 100644 --- a/tests/push/test_email.py +++ b/tests/push/test_email.py @@ -114,7 +114,7 @@ def prepare(self, reactor, clock, hs): ) self.pusher = self.get_success( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=self.user_id, access_token=self.token_id, kind="email", @@ -136,7 +136,7 @@ def test_need_validated_email(self): """ with self.assertRaises(SynapseError) as cm: self.get_success_or_raise( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=self.user_id, access_token=self.token_id, kind="email", diff --git a/tests/push/test_http.py b/tests/push/test_http.py index d9c68cdd2d22..b383b8401f62 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -19,9 +19,10 @@ import synapse.rest.admin from synapse.logging.context import make_deferred_yieldable -from synapse.push import PusherConfigException -from synapse.rest.client import login, push_rule, receipts, room +from synapse.push import PusherConfig, PusherConfigException +from synapse.rest.client import login, push_rule, pusher, receipts, room from synapse.server import HomeServer +from synapse.storage.databases.main.registration import TokenLookupResult from synapse.types import JsonDict from synapse.util import Clock @@ -35,6 +36,7 @@ class HTTPPusherTests(HomeserverTestCase): login.register_servlets, receipts.register_servlets, push_rule.register_servlets, + pusher.register_servlets, ] user_id = True hijack_auth = False @@ -74,7 +76,7 @@ def test_invalid_configuration(self) -> None: def test_data(data: Optional[JsonDict]) -> None: self.get_failure( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, access_token=token_id, kind="http", @@ -119,7 +121,7 @@ def test_sends_http(self) -> None: token_id = user_tuple.token_id self.get_success( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, access_token=token_id, kind="http", @@ -235,7 +237,7 @@ def test_sends_high_priority_for_encrypted(self) -> None: token_id = user_tuple.token_id self.get_success( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, access_token=token_id, kind="http", @@ -355,7 +357,7 @@ def test_sends_high_priority_for_one_to_one_only(self) -> None: token_id = user_tuple.token_id self.get_success( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, access_token=token_id, kind="http", @@ -441,7 +443,7 @@ def test_sends_high_priority_for_mention(self) -> None: token_id = user_tuple.token_id self.get_success( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, access_token=token_id, kind="http", @@ -518,7 +520,7 @@ def test_sends_high_priority_for_atroom(self) -> None: token_id = user_tuple.token_id self.get_success( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, access_token=token_id, kind="http", @@ -624,7 +626,7 @@ def _test_push_unread_count(self) -> None: token_id = user_tuple.token_id self.get_success( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, access_token=token_id, kind="http", @@ -728,18 +730,38 @@ def _send_read_request( ) self.assertEqual(channel.code, 200, channel.json_body) - def _make_user_with_pusher(self, username: str) -> Tuple[str, str]: + def _make_user_with_pusher( + self, username: str, enabled: bool = True + ) -> Tuple[str, str]: + """Registers a user and creates a pusher for them. + + Args: + username: the localpart of the new user's Matrix ID. + enabled: whether to create the pusher in an enabled or disabled state. + """ user_id = self.register_user(username, "pass") access_token = self.login(username, "pass") # Register the pusher + self._set_pusher(user_id, access_token, enabled) + + return user_id, access_token + + def _set_pusher(self, user_id: str, access_token: str, enabled: bool) -> None: + """Creates or updates the pusher for the given user. + + Args: + user_id: the user's Matrix ID. + access_token: the access token associated with the pusher. + enabled: whether to enable or disable the pusher. + """ user_tuple = self.get_success( self.hs.get_datastores().main.get_user_by_access_token(access_token) ) token_id = user_tuple.token_id self.get_success( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, access_token=token_id, kind="http", @@ -749,11 +771,11 @@ def _make_user_with_pusher(self, username: str) -> Tuple[str, str]: pushkey="a@example.com", lang=None, data={"url": "http://example.com/_matrix/push/v1/notify"}, + enabled=enabled, + device_id=user_tuple.device_id, ) ) - return user_id, access_token - def test_dont_notify_rule_overrides_message(self) -> None: """ The override push rule will suppress notification @@ -791,3 +813,148 @@ def test_dont_notify_rule_overrides_message(self) -> None: # The user sends a message back (sends a notification) self.helper.send(room, body="Hello", tok=access_token) self.assertEqual(len(self.push_attempts), 1) + + @override_config({"experimental_features": {"msc3881_enabled": True}}) + def test_disable(self) -> None: + """Tests that disabling a pusher means it's not pushed to anymore.""" + user_id, access_token = self._make_user_with_pusher("user") + other_user_id, other_access_token = self._make_user_with_pusher("otheruser") + + room = self.helper.create_room_as(user_id, tok=access_token) + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # Send a message and check that it generated a push. + self.helper.send(room, body="Hi!", tok=other_access_token) + self.assertEqual(len(self.push_attempts), 1) + + # Disable the pusher. + self._set_pusher(user_id, access_token, enabled=False) + + # Send another message and check that it did not generate a push. + self.helper.send(room, body="Hi!", tok=other_access_token) + self.assertEqual(len(self.push_attempts), 1) + + # Get the pushers for the user and check that it is marked as disabled. + channel = self.make_request("GET", "/pushers", access_token=access_token) + self.assertEqual(channel.code, 200) + self.assertEqual(len(channel.json_body["pushers"]), 1) + + enabled = channel.json_body["pushers"][0]["org.matrix.msc3881.enabled"] + self.assertFalse(enabled) + self.assertTrue(isinstance(enabled, bool)) + + @override_config({"experimental_features": {"msc3881_enabled": True}}) + def test_enable(self) -> None: + """Tests that enabling a disabled pusher means it gets pushed to.""" + # Create the user with the pusher already disabled. + user_id, access_token = self._make_user_with_pusher("user", enabled=False) + other_user_id, other_access_token = self._make_user_with_pusher("otheruser") + + room = self.helper.create_room_as(user_id, tok=access_token) + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # Send a message and check that it did not generate a push. + self.helper.send(room, body="Hi!", tok=other_access_token) + self.assertEqual(len(self.push_attempts), 0) + + # Enable the pusher. + self._set_pusher(user_id, access_token, enabled=True) + + # Send another message and check that it did generate a push. + self.helper.send(room, body="Hi!", tok=other_access_token) + self.assertEqual(len(self.push_attempts), 1) + + # Get the pushers for the user and check that it is marked as enabled. + channel = self.make_request("GET", "/pushers", access_token=access_token) + self.assertEqual(channel.code, 200) + self.assertEqual(len(channel.json_body["pushers"]), 1) + + enabled = channel.json_body["pushers"][0]["org.matrix.msc3881.enabled"] + self.assertTrue(enabled) + self.assertTrue(isinstance(enabled, bool)) + + @override_config({"experimental_features": {"msc3881_enabled": True}}) + def test_null_enabled(self) -> None: + """Tests that a pusher that has an 'enabled' column set to NULL (eg pushers + created before the column was introduced) is considered enabled. + """ + # We intentionally set 'enabled' to None so that it's stored as NULL in the + # database. + user_id, access_token = self._make_user_with_pusher("user", enabled=None) # type: ignore[arg-type] + + channel = self.make_request("GET", "/pushers", access_token=access_token) + self.assertEqual(channel.code, 200) + self.assertEqual(len(channel.json_body["pushers"]), 1) + self.assertTrue(channel.json_body["pushers"][0]["org.matrix.msc3881.enabled"]) + + def test_update_different_device_access_token_device_id(self) -> None: + """Tests that if we create a pusher from one device, the update it from another + device, the access token and device ID associated with the pusher stays the + same. + """ + # Create a user with a pusher. + user_id, access_token = self._make_user_with_pusher("user") + + # Get the token ID for the current access token, since that's what we store in + # the pushers table. Also get the device ID from it. + user_tuple = self.get_success( + self.hs.get_datastores().main.get_user_by_access_token(access_token) + ) + token_id = user_tuple.token_id + device_id = user_tuple.device_id + + # Generate a new access token, and update the pusher with it. + new_token = self.login("user", "pass") + self._set_pusher(user_id, new_token, enabled=False) + + # Get the current list of pushers for the user. + ret = self.get_success( + self.hs.get_datastores().main.get_pushers_by({"user_name": user_id}) + ) + pushers: List[PusherConfig] = list(ret) + + # Check that we still have one pusher, and that the access token and device ID + # associated with it didn't change. + self.assertEqual(len(pushers), 1) + self.assertEqual(pushers[0].access_token, token_id) + self.assertEqual(pushers[0].device_id, device_id) + + @override_config({"experimental_features": {"msc3881_enabled": True}}) + def test_device_id(self) -> None: + """Tests that a pusher created with a given device ID shows that device ID in + GET /pushers requests. + """ + self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # We create the pusher with an HTTP request rather than with + # _make_user_with_pusher so that we can test the device ID is correctly set when + # creating a pusher via an API call. + self.make_request( + method="POST", + path="/pushers/set", + content={ + "kind": "http", + "app_id": "m.http", + "app_display_name": "HTTP Push Notifications", + "device_display_name": "pushy push", + "pushkey": "a@example.com", + "lang": "en", + "data": {"url": "http://example.com/_matrix/push/v1/notify"}, + }, + access_token=access_token, + ) + + # Look up the user info for the access token so we can compare the device ID. + lookup_result: TokenLookupResult = self.get_success( + self.hs.get_datastores().main.get_user_by_access_token(access_token) + ) + + # Get the user's devices and check it has the correct device ID. + channel = self.make_request("GET", "/pushers", access_token=access_token) + self.assertEqual(channel.code, 200) + self.assertEqual(len(channel.json_body["pushers"]), 1) + self.assertEqual( + channel.json_body["pushers"][0]["org.matrix.msc3881.device_id"], + lookup_result.device_id, + ) diff --git a/tests/replication/test_module_cache_invalidation.py b/tests/replication/test_module_cache_invalidation.py new file mode 100644 index 000000000000..b93cae67d3c4 --- /dev/null +++ b/tests/replication/test_module_cache_invalidation.py @@ -0,0 +1,79 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +import synapse +from synapse.module_api import cached + +from tests.replication._base import BaseMultiWorkerStreamTestCase + +logger = logging.getLogger(__name__) + +FIRST_VALUE = "one" +SECOND_VALUE = "two" + +KEY = "mykey" + + +class TestCache: + current_value = FIRST_VALUE + + @cached() + async def cached_function(self, user_id: str) -> str: + return self.current_value + + +class ModuleCacheInvalidationTestCase(BaseMultiWorkerStreamTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + ] + + def test_module_cache_full_invalidation(self): + main_cache = TestCache() + self.hs.get_module_api().register_cached_function(main_cache.cached_function) + + worker_hs = self.make_worker_hs("synapse.app.generic_worker") + + worker_cache = TestCache() + worker_hs.get_module_api().register_cached_function( + worker_cache.cached_function + ) + + self.assertEqual(FIRST_VALUE, self.get_success(main_cache.cached_function(KEY))) + self.assertEqual( + FIRST_VALUE, self.get_success(worker_cache.cached_function(KEY)) + ) + + main_cache.current_value = SECOND_VALUE + worker_cache.current_value = SECOND_VALUE + # No invalidation yet, should return the cached value on both the main process and the worker + self.assertEqual(FIRST_VALUE, self.get_success(main_cache.cached_function(KEY))) + self.assertEqual( + FIRST_VALUE, self.get_success(worker_cache.cached_function(KEY)) + ) + + # Full invalidation on the main process, should be replicated on the worker that + # should returned the updated value too + self.get_success( + self.hs.get_module_api().invalidate_cache( + main_cache.cached_function, (KEY,) + ) + ) + + self.assertEqual( + SECOND_VALUE, self.get_success(main_cache.cached_function(KEY)) + ) + self.assertEqual( + SECOND_VALUE, self.get_success(worker_cache.cached_function(KEY)) + ) diff --git a/tests/replication/test_pusher_shard.py b/tests/replication/test_pusher_shard.py index 8f4f6688ce86..59fea93e490e 100644 --- a/tests/replication/test_pusher_shard.py +++ b/tests/replication/test_pusher_shard.py @@ -55,7 +55,7 @@ def _create_pusher_and_send_msg(self, localpart): token_id = user_dict.token_id self.get_success( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=user_id, access_token=token_id, kind="http", diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index ec5ccf6fcad0..1847e6ad6ba4 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -2839,7 +2839,7 @@ def test_get_pushers(self) -> None: token_id = user_tuple.token_id self.get_success( - self.hs.get_pusherpool().add_pusher( + self.hs.get_pusherpool().add_or_update_pusher( user_id=self.other_user, access_token=token_id, kind="http", @@ -4140,3 +4140,90 @@ def test_success(self) -> None: {"b": 2}, channel.json_body["account_data"]["rooms"]["test_room"]["m.per_room"], ) + + +class UsersByExternalIdTestCase(unittest.HomeserverTestCase): + + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + self.admin_user = self.register_user("admin", "pass", admin=True) + self.admin_user_tok = self.login("admin", "pass") + + self.other_user = self.register_user("user", "pass") + self.get_success( + self.store.record_user_external_id( + "the-auth-provider", "the-external-id", self.other_user + ) + ) + self.get_success( + self.store.record_user_external_id( + "another-auth-provider", "a:complex@external/id", self.other_user + ) + ) + + def test_no_auth(self) -> None: + """Try to lookup a user without authentication.""" + url = ( + "/_synapse/admin/v1/auth_providers/the-auth-provider/users/the-external-id" + ) + + channel = self.make_request( + "GET", + url, + ) + + self.assertEqual(401, channel.code, msg=channel.json_body) + self.assertEqual(Codes.MISSING_TOKEN, channel.json_body["errcode"]) + + def test_binding_does_not_exist(self) -> None: + """Tests that a lookup for an external ID that does not exist returns a 404""" + url = "/_synapse/admin/v1/auth_providers/the-auth-provider/users/unknown-id" + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(404, channel.code, msg=channel.json_body) + self.assertEqual(Codes.NOT_FOUND, channel.json_body["errcode"]) + + def test_success(self) -> None: + """Tests a successful external ID lookup""" + url = ( + "/_synapse/admin/v1/auth_providers/the-auth-provider/users/the-external-id" + ) + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual( + {"user_id": self.other_user}, + channel.json_body, + ) + + def test_success_urlencoded(self) -> None: + """Tests a successful external ID lookup with an url-encoded ID""" + url = "/_synapse/admin/v1/auth_providers/another-auth-provider/users/a%3Acomplex%40external%2Fid" + + channel = self.make_request( + "GET", + url, + access_token=self.admin_user_tok, + ) + + self.assertEqual(200, channel.code, msg=channel.json_body) + self.assertEqual( + {"user_id": self.other_user}, + channel.json_body, + ) diff --git a/tests/rest/client/test_login_token_request.py b/tests/rest/client/test_login_token_request.py new file mode 100644 index 000000000000..d5bb16c98daa --- /dev/null +++ b/tests/rest/client/test_login_token_request.py @@ -0,0 +1,132 @@ +# Copyright 2022 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.rest import admin +from synapse.rest.client import login, login_token_request +from synapse.server import HomeServer +from synapse.util import Clock + +from tests import unittest +from tests.unittest import override_config + + +class LoginTokenRequestServletTestCase(unittest.HomeserverTestCase): + + servlets = [ + login.register_servlets, + admin.register_servlets, + login_token_request.register_servlets, + ] + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.hs = self.setup_test_homeserver() + self.hs.config.registration.enable_registration = True + self.hs.config.registration.registrations_require_3pid = [] + self.hs.config.registration.auto_join_rooms = [] + self.hs.config.captcha.enable_registration_captcha = False + + return self.hs + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.user = "user123" + self.password = "password" + + def test_disabled(self) -> None: + channel = self.make_request("POST", "/login/token", {}, access_token=None) + self.assertEqual(channel.code, 400) + + self.register_user(self.user, self.password) + token = self.login(self.user, self.password) + + channel = self.make_request("POST", "/login/token", {}, access_token=token) + self.assertEqual(channel.code, 400) + + @override_config({"experimental_features": {"msc3882_enabled": True}}) + def test_require_auth(self) -> None: + channel = self.make_request("POST", "/login/token", {}, access_token=None) + self.assertEqual(channel.code, 401) + + @override_config({"experimental_features": {"msc3882_enabled": True}}) + def test_uia_on(self) -> None: + user_id = self.register_user(self.user, self.password) + token = self.login(self.user, self.password) + + channel = self.make_request("POST", "/login/token", {}, access_token=token) + self.assertEqual(channel.code, 401) + self.assertIn({"stages": ["m.login.password"]}, channel.json_body["flows"]) + + session = channel.json_body["session"] + + uia = { + "auth": { + "type": "m.login.password", + "identifier": {"type": "m.id.user", "user": self.user}, + "password": self.password, + "session": session, + }, + } + + channel = self.make_request("POST", "/login/token", uia, access_token=token) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["expires_in"], 300) + + login_token = channel.json_body["login_token"] + + channel = self.make_request( + "POST", + "/login", + content={"type": "m.login.token", "token": login_token}, + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.json_body["user_id"], user_id) + + @override_config( + {"experimental_features": {"msc3882_enabled": True, "msc3882_ui_auth": False}} + ) + def test_uia_off(self) -> None: + user_id = self.register_user(self.user, self.password) + token = self.login(self.user, self.password) + + channel = self.make_request("POST", "/login/token", {}, access_token=token) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["expires_in"], 300) + + login_token = channel.json_body["login_token"] + + channel = self.make_request( + "POST", + "/login", + content={"type": "m.login.token", "token": login_token}, + ) + self.assertEqual(channel.code, 200, channel.result) + self.assertEqual(channel.json_body["user_id"], user_id) + + @override_config( + { + "experimental_features": { + "msc3882_enabled": True, + "msc3882_ui_auth": False, + "msc3882_token_timeout": "15s", + } + } + ) + def test_expires_in(self) -> None: + self.register_user(self.user, self.password) + token = self.login(self.user, self.password) + + channel = self.make_request("POST", "/login/token", {}, access_token=token) + self.assertEqual(channel.code, 200) + self.assertEqual(channel.json_body["expires_in"], 15) diff --git a/tests/rest/client/test_relations.py b/tests/rest/client/test_relations.py index 651f4f415d1d..d33e34d82957 100644 --- a/tests/rest/client/test_relations.py +++ b/tests/rest/client/test_relations.py @@ -788,6 +788,7 @@ def test_basic_paginate_relations(self) -> None: channel.json_body["chunk"][0], ) + @unittest.override_config({"experimental_features": {"msc3715_enabled": True}}) def test_repeated_paginate_relations(self) -> None: """Test that if we paginate using a limit and tokens then we get the expected events. @@ -809,7 +810,7 @@ def test_repeated_paginate_relations(self) -> None: channel = self.make_request( "GET", - f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", + f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?limit=3{from_token}", access_token=self.user_token, ) self.assertEqual(200, channel.code, channel.json_body) @@ -827,6 +828,32 @@ def test_repeated_paginate_relations(self) -> None: found_event_ids.reverse() self.assertEqual(found_event_ids, expected_event_ids) + # Test forward pagination. + prev_token = "" + found_event_ids = [] + for _ in range(20): + from_token = "" + if prev_token: + from_token = "&from=" + prev_token + + channel = self.make_request( + "GET", + f"/_matrix/client/v1/rooms/{self.room}/relations/{self.parent_id}?org.matrix.msc3715.dir=f&limit=3{from_token}", + access_token=self.user_token, + ) + self.assertEqual(200, channel.code, channel.json_body) + + found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) + next_batch = channel.json_body.get("next_batch") + + self.assertNotEqual(prev_token, next_batch) + prev_token = next_batch + + if not prev_token: + break + + self.assertEqual(found_event_ids, expected_event_ids) + def test_pagination_from_sync_and_messages(self) -> None: """Pagination tokens from /sync and /messages can be used to paginate /relations.""" channel = self._send_relation(RelationTypes.ANNOTATION, "m.reaction", "A") diff --git a/tests/unittest.py b/tests/unittest.py index 975b0a23a7b7..00cb023198b5 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -300,47 +300,31 @@ def setUp(self) -> None: if hasattr(self, "user_id"): if self.hijack_auth: assert self.helper.auth_user_id is not None + token = "some_fake_token" # We need a valid token ID to satisfy foreign key constraints. token_id = self.get_success( self.hs.get_datastores().main.add_access_token_to_user( self.helper.auth_user_id, - "some_fake_token", + token, None, None, ) ) - async def get_user_by_access_token( - token: Optional[str] = None, allow_guest: bool = False - ) -> JsonDict: - assert self.helper.auth_user_id is not None - return { - "user": UserID.from_string(self.helper.auth_user_id), - "token_id": token_id, - "is_guest": False, - } - - async def get_user_by_req( - request: SynapseRequest, - allow_guest: bool = False, - allow_expired: bool = False, - ) -> Requester: + # This has to be a function and not just a Mock, because + # `self.helper.auth_user_id` is temporarily reassigned in some tests + async def get_requester(*args, **kwargs) -> Requester: assert self.helper.auth_user_id is not None return create_requester( - UserID.from_string(self.helper.auth_user_id), - token_id, - False, - False, - None, + user_id=UserID.from_string(self.helper.auth_user_id), + access_token_id=token_id, ) # Type ignore: mypy doesn't like us assigning to methods. - self.hs.get_auth().get_user_by_req = get_user_by_req # type: ignore[assignment] - self.hs.get_auth().get_user_by_access_token = get_user_by_access_token # type: ignore[assignment] - self.hs.get_auth().get_access_token_from_request = Mock( # type: ignore[assignment] - return_value="1234" - ) + self.hs.get_auth().get_user_by_req = get_requester # type: ignore[assignment] + self.hs.get_auth().get_user_by_access_token = get_requester # type: ignore[assignment] + self.hs.get_auth().get_access_token_from_request = Mock(return_value=token) # type: ignore[assignment] if self.needs_threadpool: self.reactor.threadpool = ThreadPool() # type: ignore[assignment]