Codebase list matrix-synapse / 41db026
Update upstream source from tag 'upstream/1.73.0' Update to upstream version '1.73.0' with Debian dir 8c122f4c1f40f1d1c4989902b42fd210edc223d9 Andrej Shadura 1 year, 4 months ago
164 changed file(s) with 3194 addition(s) and 1706 deletion(s). Raw diff Collapse all Expand all
33 root = true
44
55 # 4 space indentation
6 [*.py]
6 [*.{py,pyi}]
77 indent_style = space
88 indent_size = 4
99 max_line_length = 88
7373 - Debian packages from packages.matrix.org
7474 - pip (from PyPI)
7575 - Other (please mention below)
76 - I don't know
77 validations:
78 required: true
79 - type: input
80 id: database
81 attributes:
82 label: Database
83 description: |
84 Are you using SQLite or PostgreSQL? What's the version of your database?
85
86 If PostgreSQL, please also answer the following:
87 - are you using a single PostgreSQL server
88 or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)?
89 - have you previously ported from SQLite using the Synapse "portdb" script?
90 - have you previously restored from a backup?
91 validations:
92 required: true
93 - type: dropdown
94 id: workers
95 attributes:
96 label: Workers
97 description: |
98 Are you running a single Synapse process, or are you running
99 [2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)?
100 options:
101 - Single process
102 - Multiple workers
103 - I don't know
104 validations:
105 required: true
76106 - type: textarea
77107 id: platform
78108 attributes:
83113 validations:
84114 required: true
85115 - type: textarea
116 id: config
117 attributes:
118 label: Configuration
119 description: |
120 Do you have any unusual config options turned on? If so, please provide details.
121
122 - Experimental or undocumented features
123 - [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence)
124 - [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html)
125 - [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html)
126 - type: textarea
86127 id: logs
87128 attributes:
88129 label: Relevant log output
89130 description: |
90131 Please copy and paste any relevant log output, ideally at INFO or DEBUG log level.
91 This will be automatically formatted into code, so there is no need for backticks.
132 This will be automatically formatted into code, so there is no need for backticks (`\``).
92133
93134 Please be careful to remove any personal or private data.
94135
95 **Bug reports are usually very difficult to diagnose without logging.**
136 **Bug reports are usually impossible to diagnose without logging.**
96137 render: shell
97138 validations:
98139 required: true
2626 steps:
2727 - uses: actions/checkout@v3
2828 - name: Install Rust
29 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
29 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
3030 with:
3131 toolchain: stable
3232 - uses: Swatinem/rust-cache@v2
6060 - uses: actions/checkout@v3
6161
6262 - name: Install Rust
63 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
63 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
6464 with:
6565 toolchain: stable
6666 - uses: Swatinem/rust-cache@v2
133133 - uses: actions/checkout@v3
134134
135135 - name: Install Rust
136 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
136 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
137137 with:
138138 toolchain: stable
139139 - uses: Swatinem/rust-cache@v2
0 # This task does not run complement tests, see tests.yaml instead.
1 # This task does not build docker images for synapse for use on docker hub, see docker.yaml instead
2
3 name: Store complement-synapse image in ghcr.io
4 on:
5 push:
6 branches: [ "master" ]
7 schedule:
8 - cron: '0 5 * * *'
9 workflow_dispatch:
10 inputs:
11 branch:
12 required: true
13 default: 'develop'
14 type: choice
15 options:
16 - develop
17 - master
18
19 # Only run this action once per pull request/branch; restart if a new commit arrives.
20 # C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency
21 # and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context
22 concurrency:
23 group: ${{ github.workflow }}-${{ github.ref }}
24 cancel-in-progress: true
25
26 jobs:
27 build:
28 name: Build and push complement image
29 runs-on: ubuntu-latest
30 permissions:
31 contents: read
32 packages: write
33 steps:
34 - name: Checkout specific branch (debug build)
35 uses: actions/checkout@v3
36 if: github.event_name == 'workflow_dispatch'
37 with:
38 ref: ${{ inputs.branch }}
39 - name: Checkout clean copy of develop (scheduled build)
40 uses: actions/checkout@v3
41 if: github.event_name == 'schedule'
42 with:
43 ref: develop
44 - name: Checkout clean copy of master (on-push)
45 uses: actions/checkout@v3
46 if: github.event_name == 'push'
47 with:
48 ref: master
49 - name: Login to registry
50 uses: docker/login-action@v1
51 with:
52 registry: ghcr.io
53 username: ${{ github.actor }}
54 password: ${{ secrets.GITHUB_TOKEN }}
55 - name: Work out labels for complement image
56 id: meta
57 uses: docker/metadata-action@v4
58 with:
59 images: ghcr.io/${{ github.repository }}/complement-synapse
60 tags: |
61 type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}}
62 type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }}
63 type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }}
64 type=sha,format=long
65 - name: Run scripts-dev/complement.sh to generate complement-synapse:latest image.
66 run: scripts-dev/complement.sh --build-only
67 - name: Tag and push generated image
68 run: |
69 for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do
70 echo "tag and push $TAG"
71 docker tag complement-synapse $TAG
72 docker push $TAG
73 done
2626 rust:
2727 - 'rust/**'
2828 - 'Cargo.toml'
29 - 'Cargo.lock'
2930
3031 check-sampleconfig:
3132 runs-on: ubuntu-latest
101102 # There don't seem to be versioned releases of this action per se: for each rust
102103 # version there is a branch which gets constantly rebased on top of master.
103104 # We pin to a specific commit for paranoia's sake.
104 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
105 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
105106 with:
106107 toolchain: 1.58.1
107108 components: clippy
121122 # There don't seem to be versioned releases of this action per se: for each rust
122123 # version there is a branch which gets constantly rebased on top of master.
123124 # We pin to a specific commit for paranoia's sake.
124 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
125 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
125126 with:
126127 toolchain: 1.58.1
127128 components: rustfmt
183184 # There don't seem to be versioned releases of this action per se: for each rust
184185 # version there is a branch which gets constantly rebased on top of master.
185186 # We pin to a specific commit for paranoia's sake.
186 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
187 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
187188 with:
188189 toolchain: 1.58.1
189190 - uses: Swatinem/rust-cache@v2
227228 # There don't seem to be versioned releases of this action per se: for each rust
228229 # version there is a branch which gets constantly rebased on top of master.
229230 # We pin to a specific commit for paranoia's sake.
230 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
231 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
231232 with:
232233 toolchain: 1.58.1
233234 - uses: Swatinem/rust-cache@v2
345346 # There don't seem to be versioned releases of this action per se: for each rust
346347 # version there is a branch which gets constantly rebased on top of master.
347348 # We pin to a specific commit for paranoia's sake.
348 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
349 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
349350 with:
350351 toolchain: 1.58.1
351352 - uses: Swatinem/rust-cache@v2
488489 # There don't seem to be versioned releases of this action per se: for each rust
489490 # version there is a branch which gets constantly rebased on top of master.
490491 # We pin to a specific commit for paranoia's sake.
491 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
492 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
492493 with:
493494 toolchain: 1.58.1
494495 - uses: Swatinem/rust-cache@v2
516517 # There don't seem to be versioned releases of this action per se: for each rust
517518 # version there is a branch which gets constantly rebased on top of master.
518519 # We pin to a specific commit for paranoia's sake.
519 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
520 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
520521 with:
521522 toolchain: 1.58.1
522523 - uses: Swatinem/rust-cache@v2
1717 - uses: actions/checkout@v3
1818
1919 - name: Install Rust
20 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
20 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
2121 with:
2222 toolchain: stable
2323 - uses: Swatinem/rust-cache@v2
4242 - run: sudo apt-get -qq install xmlsec1
4343
4444 - name: Install Rust
45 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
45 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
4646 with:
4747 toolchain: stable
4848 - uses: Swatinem/rust-cache@v2
8181 - uses: actions/checkout@v3
8282
8383 - name: Install Rust
84 uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb
84 uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f
8585 with:
8686 toolchain: stable
8787 - uses: Swatinem/rust-cache@v2
0 Synapse 1.73.0 (2022-12-06)
1 ===========================
2
3 Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details.
4
5 No significant changes since 1.73.0rc2.
6
7
8 Synapse 1.73.0rc2 (2022-12-01)
9 ==============================
10
11 Bugfixes
12 --------
13
14 - Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582))
15
16
17 Synapse 1.73.0rc1 (2022-11-29)
18 ==============================
19
20 Features
21 --------
22
23 - Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527))
24 - Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534))
25 - Adds support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917))
26 - Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471))
27 - Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510))
28 - Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524))
29 - Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580))
30
31
32 Bugfixes
33 --------
34
35 - Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149))
36 - Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393))
37 - Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466))
38 - Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537))
39 - Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574))
40 - In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565))
41 - Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490))
42
43
44 Improved Documentation
45 ----------------------
46
47 - Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499))
48
49
50 Deprecations and Removals
51 -------------------------
52
53 - Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538))
54
55
56 Internal Changes
57 ----------------
58
59 - Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)).
60 - Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468))
61 - Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476))
62 - Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496))
63 ([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573))
64 - Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403))
65 - Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404))
66 - Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408))
67 - Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515))
68 - Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449))
69 - Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469))
70 - `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479))
71 - Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487))
72 - Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516))
73 - Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522))
74 - Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526))
75 - Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571))
76 - Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575))
77 - Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)).
78
79
080 Synapse 1.72.0 (2022-11-22)
181 ===========================
282
322322
323323 [[package]]
324324 name = "serde"
325 version = "1.0.147"
326 source = "registry+https://github.com/rust-lang/crates.io-index"
327 checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965"
325 version = "1.0.148"
326 source = "registry+https://github.com/rust-lang/crates.io-index"
327 checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc"
328328 dependencies = [
329329 "serde_derive",
330330 ]
331331
332332 [[package]]
333333 name = "serde_derive"
334 version = "1.0.147"
335 source = "registry+https://github.com/rust-lang/crates.io-index"
336 checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852"
334 version = "1.0.148"
335 source = "registry+https://github.com/rust-lang/crates.io-index"
336 checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c"
337337 dependencies = [
338338 "proc-macro2",
339339 "quote",
342342
343343 [[package]]
344344 name = "serde_json"
345 version = "1.0.87"
346 source = "registry+https://github.com/rust-lang/crates.io-index"
347 checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45"
345 version = "1.0.89"
346 source = "registry+https://github.com/rust-lang/crates.io-index"
347 checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db"
348348 dependencies = [
349349 "itoa",
350350 "ryu",
365365
366366 [[package]]
367367 name = "syn"
368 version = "1.0.102"
369 source = "registry+https://github.com/rust-lang/crates.io-index"
370 checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1"
368 version = "1.0.104"
369 source = "registry+https://github.com/rust-lang/crates.io-index"
370 checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce"
371371 dependencies = [
372372 "proc-macro2",
373373 "quote",
9999 # client-side support for partial state in /send_join responses
100100 faster_joins: true
101101 {% endif %}
102 # Enable jump to date endpoint
103 msc3030_enabled: true
104102 # Filtering /messages by relation type.
105103 msc3874_enabled: true
106104
139139 "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event",
140140 "^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms",
141141 "^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases",
142 "^/_matrix/client/v1/rooms/.*/timestamp_to_event$",
142143 "^/_matrix/client/(api/v1|r0|v3|unstable)/search",
143144 ],
144145 "shared_extra_conf": {},
162163 "^/_matrix/federation/(v1|v2)/invite/",
163164 "^/_matrix/federation/(v1|v2)/query_auth/",
164165 "^/_matrix/federation/(v1|v2)/event_auth/",
166 "^/_matrix/federation/v1/timestamp_to_event/",
165167 "^/_matrix/federation/(v1|v2)/exchange_third_party_invite/",
166168 "^/_matrix/federation/(v1|v2)/user/devices/",
167169 "^/_matrix/federation/(v1|v2)/get_groups_publicised$",
212214 "listener_resources": ["client", "replication"],
213215 "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"],
214216 "shared_extra_conf": {},
215 "worker_extra_conf": (
216 "worker_main_http_uri: http://127.0.0.1:%d"
217 % (MAIN_PROCESS_HTTP_LISTENER_PORT,)
218 ),
217 "worker_extra_conf": "",
219218 },
220219 "account_data": {
221220 "app": "synapse.app.generic_worker",
8686 wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb
8787 dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb
8888 ```
89
90 # Upgrading to v1.73.0
91
92 ## Legacy Prometheus metric names have now been removed
93
94 Synapse v1.69.0 included the deprecation of legacy Prometheus metric names
95 and offered an option to disable them.
96 Synapse v1.71.0 disabled legacy Prometheus metric names by default.
97
98 This version, v1.73.0, removes those legacy Prometheus metric names entirely.
99 This also means that the `enable_legacy_metrics` configuration option has been
100 removed; it will no longer be possible to re-enable the legacy metric names.
101
102 If you use metrics and have not yet updated your Grafana dashboard(s),
103 Prometheus console(s) or alerting rule(s), please consider doing so when upgrading
104 to this version.
105 Note that the included Grafana dashboard was updated in v1.72.0 to correct some
106 metric names which were missed when legacy metrics were disabled by default.
107
108 See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names)
109 for more context.
110
89111
90112 # Upgrading to v1.72.0
91113
1818 Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings.
1919
2020 ## Making an Admin API request
21 For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints)
21 For security reasons, we [recommend](../../../reverse_proxy.md#synapse-administration-endpoints)
2222 that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a
2323 reverse proxy. This means you should typically query the Admin API from a terminal on
2424 the machine which runs Synapse.
24352435 ```yaml
24362436 enable_metrics: true
24372437 ```
2438 ---
2439 ### `enable_legacy_metrics`
2440
2441 Set to `true` to publish both legacy and non-legacy Prometheus metric names,
2442 or to `false` to only publish non-legacy Prometheus metric names.
2443 Defaults to `false`. Has no effect if `enable_metrics` is `false`.
2444 **In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.**
2445
2446 Legacy metric names include:
2447 - metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules;
2448 - counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard.
2449
2450 These legacy metric names are unconventional and not compliant with OpenMetrics standards.
2451 They are included for backwards compatibility.
2452
2453 Example configuration:
2454 ```yaml
2455 enable_legacy_metrics: false
2456 ```
2457
2458 See https://github.com/matrix-org/synapse/issues/11106 for context.
2459
2460 *Since v1.67.0.*
2461
2462 **Will be removed in v1.73.0.**
24632438 ---
24642439 ### `sentry`
24652440
29922967
29932968 For the default provider, the following settings are available:
29942969
2995 * subject_claim: name of the claim containing a unique identifier
2970 * `subject_claim`: name of the claim containing a unique identifier
29962971 for the user. Defaults to 'sub', which OpenID Connect
29972972 compliant providers should provide.
2973
2974 * `picture_claim`: name of the claim containing an url for the user's profile picture.
2975 Defaults to 'picture', which OpenID Connect compliant providers should provide
2976 and has to refer to a direct image file such as PNG, JPEG, or GIF image file.
2977
2978 Currently only supported in monolithic (single-process) server configurations
2979 where the media repository runs within the Synapse process.
29982980
29992981 * `localpart_template`: Jinja2 template for the localpart of the MXID.
30002982 If this is not set, the user will be prompted to choose their
134134 [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)).
135135 * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option
136136 with an `http` listener.
137 * If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
138 the main process (`worker_main_http_uri`).
137 * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for
138 the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer.
139139
140140 For example:
141141
190190 ^/_matrix/federation/(v1|v2)/send_leave/
191191 ^/_matrix/federation/(v1|v2)/invite/
192192 ^/_matrix/federation/v1/event_auth/
193 ^/_matrix/federation/v1/timestamp_to_event/
193194 ^/_matrix/federation/v1/exchange_third_party_invite/
194195 ^/_matrix/federation/v1/user/devices/
195196 ^/_matrix/key/v2/query
217218 ^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
218219 ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
219220 ^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
221 ^/_matrix/client/v1/rooms/.*/timestamp_to_event$
220222 ^/_matrix/client/(api/v1|r0|v3|unstable)/search$
221223
222224 # Encryption requests
223 # Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
224225 ^/_matrix/client/(r0|v3|unstable)/keys/query$
225226 ^/_matrix/client/(r0|v3|unstable)/keys/changes$
226227 ^/_matrix/client/(r0|v3|unstable)/keys/claim$
375376 - persisting them to the DB, and finally
376377 - updating the events stream.
377378
378 Because load is sharded in this way, you *must* restart all worker instances when
379 Because load is sharded in this way, you *must* restart all worker instances when
379380 adding or removing event persisters.
380381
381382 An `event_persister` should not be mistaken for an `event_creator`.
1010 local_partial_types = True
1111 no_implicit_optional = True
1212 disallow_untyped_defs = True
13 strict_equality = True
1314
1415 files =
1516 docker/,
5758 |tests/server_notices/test_resource_limits_server_notices.py
5859 |tests/test_state.py
5960 |tests/test_terms_auth.py
60 |tests/util/caches/test_cached_call.py
61 |tests/util/caches/test_deferred_cache.py
62 |tests/util/caches/test_descriptors.py
63 |tests/util/caches/test_response_cache.py
64 |tests/util/caches/test_ttlcache.py
6561 |tests/util/test_async_helpers.py
6662 |tests/util/test_batching_queue.py
6763 |tests/util/test_dict_cache.py
116112 [mypy-tests.state.test_profile]
117113 disallow_untyped_defs = True
118114
115 [mypy-tests.storage.test_id_generators]
116 disallow_untyped_defs = True
117
119118 [mypy-tests.storage.test_profile]
119 disallow_untyped_defs = True
120
121 [mypy-tests.handlers.test_sso]
120122 disallow_untyped_defs = True
121123
122124 [mypy-tests.storage.test_user_directory]
128130 [mypy-tests.federation.transport.test_client]
129131 disallow_untyped_defs = True
130132
133 [mypy-tests.util.caches.*]
134 disallow_untyped_defs = True
135
136 [mypy-tests.util.caches.test_descriptors]
137 disallow_untyped_defs = False
138
131139 [mypy-tests.utils]
132140 disallow_untyped_defs = True
133
134141
135142 ;; Dependencies without annotations
136143 ;; Before ignoring a module, check to see if type stubs are available.
662662
663663 [[package]]
664664 name = "phonenumbers"
665 version = "8.12.56"
665 version = "8.13.0"
666666 description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers."
667667 category = "main"
668668 optional = false
813813
814814 [[package]]
815815 name = "pygithub"
816 version = "1.56"
816 version = "1.57"
817817 description = "Use the full Github API v3"
818818 category = "dev"
819819 optional = false
820 python-versions = ">=3.6"
820 python-versions = ">=3.7"
821821
822822 [package.dependencies]
823823 deprecated = "*"
824 pyjwt = ">=2.0"
824 pyjwt = ">=2.4.0"
825825 pynacl = ">=1.4.0"
826826 requests = ">=2.14.0"
827827
10751075
10761076 [[package]]
10771077 name = "sentry-sdk"
1078 version = "1.10.1"
1078 version = "1.11.0"
10791079 description = "Python client for Sentry (https://sentry.io)"
10801080 category = "main"
10811081 optional = true
10971097 flask = ["blinker (>=1.1)", "flask (>=0.11)"]
10981098 httpx = ["httpx (>=0.16.0)"]
10991099 pure-eval = ["asttokens", "executing", "pure-eval"]
1100 pymongo = ["pymongo (>=3.1)"]
11001101 pyspark = ["pyspark (>=2.4.4)"]
11011102 quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
11021103 rq = ["rq (>=0.6)"]
12551256
12561257 [[package]]
12571258 name = "towncrier"
1258 version = "21.9.0"
1259 version = "22.8.0"
12591260 description = "Building newsfiles for your project."
12601261 category = "dev"
12611262 optional = false
1262 python-versions = "*"
1263 python-versions = ">=3.7"
12631264
12641265 [package.dependencies]
12651266 click = "*"
12671268 incremental = "*"
12681269 jinja2 = "*"
12691270 setuptools = "*"
1270 tomli = {version = "*", markers = "python_version >= \"3.6\""}
1271 tomli = "*"
12711272
12721273 [package.extras]
12731274 dev = ["packaging"]
14381439
14391440 [[package]]
14401441 name = "types-pillow"
1441 version = "9.2.2.1"
1442 version = "9.3.0.1"
14421443 description = "Typing stubs for Pillow"
14431444 category = "dev"
14441445 optional = false
22562257 {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
22572258 ]
22582259 phonenumbers = [
2259 {file = "phonenumbers-8.12.56-py2.py3-none-any.whl", hash = "sha256:80a7422cf0999a6f9b7a2e6cfbdbbfcc56ab5b75414dc3b805bbec91276b64a3"},
2260 {file = "phonenumbers-8.12.56.tar.gz", hash = "sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868"},
2260 {file = "phonenumbers-8.13.0-py2.py3-none-any.whl", hash = "sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0"},
2261 {file = "phonenumbers-8.13.0.tar.gz", hash = "sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4"},
22612262 ]
22622263 pillow = [
22632264 {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"},
24182419 {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"},
24192420 ]
24202421 pygithub = [
2421 {file = "PyGithub-1.56-py3-none-any.whl", hash = "sha256:d15f13d82165306da8a68aefc0f848a6f6432d5febbff13b60a94758ce3ef8b5"},
2422 {file = "PyGithub-1.56.tar.gz", hash = "sha256:80c6d85cf0f9418ffeb840fd105840af694c4f17e102970badbaf678251f2a01"},
2422 {file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"},
2423 {file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"},
24232424 ]
24242425 pygments = [
24252426 {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"},
25672568 {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"},
25682569 ]
25692570 sentry-sdk = [
2570 {file = "sentry-sdk-1.10.1.tar.gz", hash = "sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691"},
2571 {file = "sentry_sdk-1.10.1-py2.py3-none-any.whl", hash = "sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad"},
2571 {file = "sentry-sdk-1.11.0.tar.gz", hash = "sha256:e7b78a1ddf97a5f715a50ab8c3f7a93f78b114c67307785ee828ef67a5d6f117"},
2572 {file = "sentry_sdk-1.11.0-py2.py3-none-any.whl", hash = "sha256:f467e6c7fac23d4d42bc83eb049c400f756cd2d65ab44f0cc1165d0c7c3d40bc"},
25722573 ]
25732574 service-identity = [
25742575 {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"},
27192720 {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"},
27202721 ]
27212722 towncrier = [
2722 {file = "towncrier-21.9.0-py2.py3-none-any.whl", hash = "sha256:fc5a88a2a54988e3a8ed2b60d553599da8330f65722cc607c839614ed87e0f92"},
2723 {file = "towncrier-21.9.0.tar.gz", hash = "sha256:9cb6f45c16e1a1eec9d0e7651165e7be60cd0ab81d13a5c96ca97a498ae87f48"},
2723 {file = "towncrier-22.8.0-py2.py3-none-any.whl", hash = "sha256:3b780c3d966e1b26414830aec3d15000654b31e64e024f3e5fd128b4c6eb8f47"},
2724 {file = "towncrier-22.8.0.tar.gz", hash = "sha256:7d3839b033859b45fb55df82b74cfd702431933c0cc9f287a5a7ea3e05d042cb"},
27242725 ]
27252726 treq = [
27262727 {file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"},
28072808 {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"},
28082809 ]
28092810 types-pillow = [
2810 {file = "types-Pillow-9.2.2.1.tar.gz", hash = "sha256:85c139e06e1c46ec5f9c634d5c54a156b0958d5d0e8be024ed353db0c804b426"},
2811 {file = "types_Pillow-9.2.2.1-py3-none-any.whl", hash = "sha256:3a6a871cade8428433a21ef459bb0a65532b87d05f9e836a0664431ce445bdcf"},
2811 {file = "types-Pillow-9.3.0.1.tar.gz", hash = "sha256:f3b7cada3fa496c78d75253c6b1f07a843d625f42e5639b320a72acaff6f7cfb"},
2812 {file = "types_Pillow-9.3.0.1-py3-none-any.whl", hash = "sha256:79837755fe9659f29efd1016e9903ac4a500e0c73260483f07296bd6ca47668b"},
28122813 ]
28132814 types-psycopg2 = [
28142815 {file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"},
5656
5757 [tool.poetry]
5858 name = "matrix-synapse"
59 version = "1.72.0"
59 version = "1.73.0"
6060 description = "Homeserver for the Matrix decentralised comms protocol"
6161 authors = ["Matrix.org Team and Contributors <packages@matrix.org>"]
6262 license = "Apache-2.0"
274274 default_enabled: true,
275275 },
276276 PushRule {
277 rule_id: Cow::Borrowed(
278 "global/underride/.org.matrix.msc3933.rule.extensible.encrypted_room_one_to_one",
279 ),
280 priority_class: 1,
281 conditions: Cow::Borrowed(&[
282 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
283 key: Cow::Borrowed("type"),
284 // MSC3933: Type changed from template rule - see MSC.
285 pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")),
286 pattern_type: None,
287 })),
288 Condition::Known(KnownCondition::RoomMemberCount {
289 is: Some(Cow::Borrowed("2")),
290 }),
291 // MSC3933: Add condition on top of template rule - see MSC.
292 Condition::Known(KnownCondition::RoomVersionSupports {
293 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
294 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
295 }),
296 ]),
297 actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
298 default: true,
299 default_enabled: true,
300 },
301 PushRule {
302 rule_id: Cow::Borrowed(
303 "global/underride/.org.matrix.msc3933.rule.extensible.message.room_one_to_one",
304 ),
305 priority_class: 1,
306 conditions: Cow::Borrowed(&[
307 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
308 key: Cow::Borrowed("type"),
309 // MSC3933: Type changed from template rule - see MSC.
310 pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")),
311 pattern_type: None,
312 })),
313 Condition::Known(KnownCondition::RoomMemberCount {
314 is: Some(Cow::Borrowed("2")),
315 }),
316 // MSC3933: Add condition on top of template rule - see MSC.
317 Condition::Known(KnownCondition::RoomVersionSupports {
318 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
319 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
320 }),
321 ]),
322 actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
323 default: true,
324 default_enabled: true,
325 },
326 PushRule {
327 rule_id: Cow::Borrowed(
328 "global/underride/.org.matrix.msc3933.rule.extensible.file.room_one_to_one",
329 ),
330 priority_class: 1,
331 conditions: Cow::Borrowed(&[
332 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
333 key: Cow::Borrowed("type"),
334 // MSC3933: Type changed from template rule - see MSC.
335 pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")),
336 pattern_type: None,
337 })),
338 Condition::Known(KnownCondition::RoomMemberCount {
339 is: Some(Cow::Borrowed("2")),
340 }),
341 // MSC3933: Add condition on top of template rule - see MSC.
342 Condition::Known(KnownCondition::RoomVersionSupports {
343 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
344 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
345 }),
346 ]),
347 actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
348 default: true,
349 default_enabled: true,
350 },
351 PushRule {
352 rule_id: Cow::Borrowed(
353 "global/underride/.org.matrix.msc3933.rule.extensible.image.room_one_to_one",
354 ),
355 priority_class: 1,
356 conditions: Cow::Borrowed(&[
357 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
358 key: Cow::Borrowed("type"),
359 // MSC3933: Type changed from template rule - see MSC.
360 pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")),
361 pattern_type: None,
362 })),
363 Condition::Known(KnownCondition::RoomMemberCount {
364 is: Some(Cow::Borrowed("2")),
365 }),
366 // MSC3933: Add condition on top of template rule - see MSC.
367 Condition::Known(KnownCondition::RoomVersionSupports {
368 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
369 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
370 }),
371 ]),
372 actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
373 default: true,
374 default_enabled: true,
375 },
376 PushRule {
377 rule_id: Cow::Borrowed(
378 "global/underride/.org.matrix.msc3933.rule.extensible.video.room_one_to_one",
379 ),
380 priority_class: 1,
381 conditions: Cow::Borrowed(&[
382 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
383 key: Cow::Borrowed("type"),
384 // MSC3933: Type changed from template rule - see MSC.
385 pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")),
386 pattern_type: None,
387 })),
388 Condition::Known(KnownCondition::RoomMemberCount {
389 is: Some(Cow::Borrowed("2")),
390 }),
391 // MSC3933: Add condition on top of template rule - see MSC.
392 Condition::Known(KnownCondition::RoomVersionSupports {
393 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
394 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
395 }),
396 ]),
397 actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
398 default: true,
399 default_enabled: true,
400 },
401 PushRule {
402 rule_id: Cow::Borrowed(
403 "global/underride/.org.matrix.msc3933.rule.extensible.audio.room_one_to_one",
404 ),
405 priority_class: 1,
406 conditions: Cow::Borrowed(&[
407 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
408 key: Cow::Borrowed("type"),
409 // MSC3933: Type changed from template rule - see MSC.
410 pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")),
411 pattern_type: None,
412 })),
413 Condition::Known(KnownCondition::RoomMemberCount {
414 is: Some(Cow::Borrowed("2")),
415 }),
416 // MSC3933: Add condition on top of template rule - see MSC.
417 Condition::Known(KnownCondition::RoomVersionSupports {
418 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
419 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
420 }),
421 ]),
422 actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]),
423 default: true,
424 default_enabled: true,
425 },
426 PushRule {
277427 rule_id: Cow::Borrowed("global/underride/.m.rule.message"),
278428 priority_class: 1,
279429 conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch(
297447 pattern_type: None,
298448 },
299449 ))]),
450 actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
451 default: true,
452 default_enabled: true,
453 },
454 PushRule {
455 rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.encrypted"),
456 priority_class: 1,
457 conditions: Cow::Borrowed(&[
458 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
459 key: Cow::Borrowed("type"),
460 // MSC3933: Type changed from template rule - see MSC.
461 pattern: Some(Cow::Borrowed("m.encrypted")),
462 pattern_type: None,
463 })),
464 // MSC3933: Add condition on top of template rule - see MSC.
465 Condition::Known(KnownCondition::RoomVersionSupports {
466 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
467 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
468 }),
469 ]),
470 actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
471 default: true,
472 default_enabled: true,
473 },
474 PushRule {
475 rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.message"),
476 priority_class: 1,
477 conditions: Cow::Borrowed(&[
478 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
479 key: Cow::Borrowed("type"),
480 // MSC3933: Type changed from template rule - see MSC.
481 pattern: Some(Cow::Borrowed("m.message")),
482 pattern_type: None,
483 })),
484 // MSC3933: Add condition on top of template rule - see MSC.
485 Condition::Known(KnownCondition::RoomVersionSupports {
486 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
487 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
488 }),
489 ]),
490 actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
491 default: true,
492 default_enabled: true,
493 },
494 PushRule {
495 rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.file"),
496 priority_class: 1,
497 conditions: Cow::Borrowed(&[
498 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
499 key: Cow::Borrowed("type"),
500 // MSC3933: Type changed from template rule - see MSC.
501 pattern: Some(Cow::Borrowed("m.file")),
502 pattern_type: None,
503 })),
504 // MSC3933: Add condition on top of template rule - see MSC.
505 Condition::Known(KnownCondition::RoomVersionSupports {
506 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
507 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
508 }),
509 ]),
510 actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
511 default: true,
512 default_enabled: true,
513 },
514 PushRule {
515 rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.image"),
516 priority_class: 1,
517 conditions: Cow::Borrowed(&[
518 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
519 key: Cow::Borrowed("type"),
520 // MSC3933: Type changed from template rule - see MSC.
521 pattern: Some(Cow::Borrowed("m.image")),
522 pattern_type: None,
523 })),
524 // MSC3933: Add condition on top of template rule - see MSC.
525 Condition::Known(KnownCondition::RoomVersionSupports {
526 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
527 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
528 }),
529 ]),
530 actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
531 default: true,
532 default_enabled: true,
533 },
534 PushRule {
535 rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.video"),
536 priority_class: 1,
537 conditions: Cow::Borrowed(&[
538 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
539 key: Cow::Borrowed("type"),
540 // MSC3933: Type changed from template rule - see MSC.
541 pattern: Some(Cow::Borrowed("m.video")),
542 pattern_type: None,
543 })),
544 // MSC3933: Add condition on top of template rule - see MSC.
545 Condition::Known(KnownCondition::RoomVersionSupports {
546 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
547 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
548 }),
549 ]),
550 actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
551 default: true,
552 default_enabled: true,
553 },
554 PushRule {
555 rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.audio"),
556 priority_class: 1,
557 conditions: Cow::Borrowed(&[
558 Condition::Known(KnownCondition::EventMatch(EventMatchCondition {
559 key: Cow::Borrowed("type"),
560 // MSC3933: Type changed from template rule - see MSC.
561 pattern: Some(Cow::Borrowed("m.audio")),
562 pattern_type: None,
563 })),
564 // MSC3933: Add condition on top of template rule - see MSC.
565 Condition::Known(KnownCondition::RoomVersionSupports {
566 // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally
567 feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"),
568 }),
569 ]),
300570 actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]),
301571 default: true,
302572 default_enabled: true,
1111 // See the License for the specific language governing permissions and
1212 // limitations under the License.
1313
14 use std::borrow::Cow;
1415 use std::collections::BTreeMap;
1516
17 use crate::push::{PushRule, PushRules};
1618 use anyhow::{Context, Error};
1719 use lazy_static::lazy_static;
1820 use log::warn;
2830 lazy_static! {
2931 /// Used to parse the `is` clause in the room member count condition.
3032 static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex");
33
34 /// Used to determine which MSC3931 room version feature flags are actually known to
35 /// the push evaluator.
36 static ref KNOWN_RVER_FLAGS: Vec<String> = vec![
37 RoomVersionFeatures::ExtensibleEvents.as_str().to_string(),
38 ];
39
40 /// The "safe" rule IDs which are not affected by MSC3932's behaviour (room versions which
41 /// declare Extensible Events support ultimately *disable* push rules which do not declare
42 /// *any* MSC3931 room_version_supports condition).
43 static ref SAFE_EXTENSIBLE_EVENTS_RULE_IDS: Vec<String> = vec![
44 "global/override/.m.rule.master".to_string(),
45 "global/override/.m.rule.roomnotif".to_string(),
46 "global/content/.m.rule.contains_user_name".to_string(),
47 ];
48 }
49
50 enum RoomVersionFeatures {
51 ExtensibleEvents,
52 }
53
54 impl RoomVersionFeatures {
55 fn as_str(&self) -> &'static str {
56 match self {
57 RoomVersionFeatures::ExtensibleEvents => "org.matrix.msc3932.extensible_events",
58 }
59 }
3160 }
3261
3362 /// Allows running a set of push rules against a particular event.
5685
5786 /// If msc3664, push rules for related events, is enabled.
5887 related_event_match_enabled: bool,
88
89 /// If MSC3931 is applicable, the feature flags for the room version.
90 room_version_feature_flags: Vec<String>,
91
92 /// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same
93 /// flag as MSC1767 (extensible events core).
94 msc3931_enabled: bool,
5995 }
6096
6197 #[pymethods]
69105 notification_power_levels: BTreeMap<String, i64>,
70106 related_events_flattened: BTreeMap<String, BTreeMap<String, String>>,
71107 related_event_match_enabled: bool,
108 room_version_feature_flags: Vec<String>,
109 msc3931_enabled: bool,
72110 ) -> Result<Self, Error> {
73111 let body = flattened_keys
74112 .get("content.body")
83121 sender_power_level,
84122 related_events_flattened,
85123 related_event_match_enabled,
124 room_version_feature_flags,
125 msc3931_enabled,
86126 })
87127 }
88128
105145 continue;
106146 }
107147
148 let rule_id = &push_rule.rule_id().to_string();
149 let extev_flag = &RoomVersionFeatures::ExtensibleEvents.as_str().to_string();
150 let supports_extensible_events = self.room_version_feature_flags.contains(extev_flag);
151 let safe_from_rver_condition = SAFE_EXTENSIBLE_EVENTS_RULE_IDS.contains(rule_id);
152 let mut has_rver_condition = false;
153
108154 for condition in push_rule.conditions.iter() {
155 has_rver_condition = has_rver_condition
156 || match condition {
157 Condition::Known(known) => match known {
158 // per MSC3932, we just need *any* room version condition to match
159 KnownCondition::RoomVersionSupports { feature: _ } => true,
160 _ => false,
161 },
162 _ => false,
163 };
109164 match self.match_condition(condition, user_id, display_name) {
110165 Ok(true) => {}
111166 Ok(false) => continue 'outer,
114169 continue 'outer;
115170 }
116171 }
172 }
173
174 // MSC3932: Disable push rules in extensible event-supporting room versions if they
175 // don't describe *any* MSC3931 room version condition, unless the rule is on the
176 // safe list.
177 if !has_rver_condition && !safe_from_rver_condition && supports_extensible_events {
178 continue;
117179 }
118180
119181 let actions = push_rule
201263 *sender_power_level >= required_level
202264 } else {
203265 false
266 }
267 }
268 KnownCondition::RoomVersionSupports { feature } => {
269 if !self.msc3931_enabled {
270 false
271 } else {
272 let flag = feature.to_string();
273 KNOWN_RVER_FLAGS.contains(&flag)
274 && self.room_version_feature_flags.contains(&flag)
204275 }
205276 }
206277 };
361432 BTreeMap::new(),
362433 BTreeMap::new(),
363434 true,
435 vec![],
436 true,
364437 )
365438 .unwrap();
366439
367440 let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob"));
368441 assert_eq!(result.len(), 3);
369442 }
443
444 #[test]
445 fn test_requires_room_version_supports_condition() {
446 let mut flattened_keys = BTreeMap::new();
447 flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string());
448 let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()];
449 let evaluator = PushRuleEvaluator::py_new(
450 flattened_keys,
451 10,
452 Some(0),
453 BTreeMap::new(),
454 BTreeMap::new(),
455 false,
456 flags,
457 true,
458 )
459 .unwrap();
460
461 // first test: are the master and contains_user_name rules excluded from the "requires room
462 // version condition" check?
463 let mut result = evaluator.run(
464 &FilteredPushRules::default(),
465 Some("@bob:example.org"),
466 None,
467 );
468 assert_eq!(result.len(), 3);
469
470 // second test: if an appropriate push rule is in play, does it get handled?
471 let custom_rule = PushRule {
472 rule_id: Cow::from("global/underride/.org.example.extensible"),
473 priority_class: 1, // underride
474 conditions: Cow::from(vec![Condition::Known(
475 KnownCondition::RoomVersionSupports {
476 feature: Cow::from(RoomVersionFeatures::ExtensibleEvents.as_str().to_string()),
477 },
478 )]),
479 actions: Cow::from(vec![Action::Notify]),
480 default: false,
481 default_enabled: true,
482 };
483 let rules = PushRules::new(vec![custom_rule]);
484 result = evaluator.run(
485 &FilteredPushRules::py_new(rules, BTreeMap::new(), true, true),
486 None,
487 None,
488 );
489 assert_eq!(result.len(), 1);
490 }
276276 SenderNotificationPermission {
277277 key: Cow<'static, str>,
278278 },
279 #[serde(rename = "org.matrix.msc3931.room_version_supports")]
280 RoomVersionSupports {
281 feature: Cow<'static, str>,
282 },
279283 }
280284
281285 impl IntoPy<PyObject> for Condition {
407411 push_rules: PushRules,
408412 enabled_map: BTreeMap<String, bool>,
409413 msc3664_enabled: bool,
414 msc1767_enabled: bool,
410415 }
411416
412417 #[pymethods]
416421 push_rules: PushRules,
417422 enabled_map: BTreeMap<String, bool>,
418423 msc3664_enabled: bool,
424 msc1767_enabled: bool,
419425 ) -> Self {
420426 Self {
421427 push_rules,
422428 enabled_map,
423429 msc3664_enabled,
430 msc1767_enabled,
424431 }
425432 }
426433
445452 return false;
446453 }
447454
455 if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") {
456 return false;
457 }
458
448459 true
449460 })
450461 .map(|r| {
491502 }
492503
493504 #[test]
505 fn test_deserialize_unstable_msc3931_condition() {
506 let json =
507 r#"{"kind":"org.matrix.msc3931.room_version_supports","feature":"org.example.feature"}"#;
508
509 let condition: Condition = serde_json::from_str(json).unwrap();
510 assert!(matches!(
511 condition,
512 Condition::Known(KnownCondition::RoomVersionSupports { feature: _ })
513 ));
514 }
515
516 #[test]
494517 fn test_deserialize_custom_condition() {
495518 let json = r#"{"kind":"custom_tag"}"#;
496519
161161 # We only test faster room joins on monoliths, because they are purposefully
162162 # being developed without worker support to start with.
163163 #
164 # The tests for importing historical messages (MSC2716) and jump to date (MSC3030)
165 # also only pass with monoliths, currently.
166 test_tags="$test_tags,faster_joins,msc2716,msc3030"
164 # The tests for importing historical messages (MSC2716) also only pass with monoliths,
165 # currently.
166 test_tags="$test_tags,faster_joins,msc2716"
167167 fi
168168
169169
4545 import signedjson.types
4646 import srvlookup
4747 import yaml
48 from requests import PreparedRequest, Response
4849 from requests.adapters import HTTPAdapter
4950 from urllib3 import HTTPConnectionPool
5051
5152 # uncomment the following to enable debug logging of http requests
52 # from httplib import HTTPConnection
53 # from http.client import HTTPConnection
5354 # HTTPConnection.debuglevel = 1
5455
5556
102103 destination: str,
103104 path: str,
104105 content: Optional[str],
106 verify_tls: bool,
105107 ) -> requests.Response:
106108 if method is None:
107109 if content is None:
140142 s.mount("matrix://", MatrixConnectionAdapter())
141143
142144 headers: Dict[str, str] = {
143 "Host": destination,
144145 "Authorization": authorization_headers[0],
145146 }
146147
151152 method=method,
152153 url=dest,
153154 headers=headers,
154 verify=False,
155 verify=verify_tls,
155156 data=content,
156157 stream=True,
157158 )
200201 )
201202
202203 parser.add_argument("--body", help="Data to send as the body of the HTTP request")
204
205 parser.add_argument(
206 "--insecure",
207 action="store_true",
208 help="Disable TLS certificate verification",
209 )
203210
204211 parser.add_argument(
205212 "path", help="request path, including the '/_matrix/federation/...' prefix."
226233 args.destination,
227234 args.path,
228235 content=args.body,
236 verify_tls=not args.insecure,
229237 )
230238
231239 sys.stderr.write("Status Code: %d\n" % (result.status_code,))
253261
254262
255263 class MatrixConnectionAdapter(HTTPAdapter):
264 def send(
265 self,
266 request: PreparedRequest,
267 *args: Any,
268 **kwargs: Any,
269 ) -> Response:
270 # overrides the send() method in the base class.
271
272 # We need to look for .well-known redirects before passing the request up to
273 # HTTPAdapter.send().
274 assert isinstance(request.url, str)
275 parsed = urlparse.urlsplit(request.url)
276 server_name = parsed.netloc
277 well_known = self._get_well_known(parsed.netloc)
278
279 if well_known:
280 server_name = well_known
281
282 # replace the scheme in the uri with https, so that cert verification is done
283 # also replace the hostname if we got a .well-known result
284 request.url = urlparse.urlunsplit(
285 ("https", server_name, parsed.path, parsed.query, parsed.fragment)
286 )
287
288 # at this point we also add the host header (otherwise urllib will add one
289 # based on the `host` from the connection returned by `get_connection`,
290 # which will be wrong if there is an SRV record).
291 request.headers["Host"] = server_name
292
293 return super().send(request, *args, **kwargs)
294
295 def get_connection(
296 self, url: str, proxies: Optional[Dict[str, str]] = None
297 ) -> HTTPConnectionPool:
298 # overrides the get_connection() method in the base class
299 parsed = urlparse.urlsplit(url)
300 (host, port, ssl_server_name) = self._lookup(parsed.netloc)
301 print(
302 f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr
303 )
304 return self.poolmanager.connection_from_host(
305 host,
306 port=port,
307 scheme="https",
308 pool_kwargs={"server_hostname": ssl_server_name},
309 )
310
256311 @staticmethod
257 def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]:
258 if s[-1] == "]":
312 def _lookup(server_name: str) -> Tuple[str, int, str]:
313 """
314 Do an SRV lookup on a server name and return the host:port to connect to
315 Given the server_name (after any .well-known lookup), return the host, port and
316 the ssl server name
317 """
318 if server_name[-1] == "]":
259319 # ipv6 literal (with no port)
260 return s, 8448
261
262 if ":" in s:
263 out = s.rsplit(":", 1)
320 return server_name, 8448, server_name
321
322 if ":" in server_name:
323 # explicit port
324 out = server_name.rsplit(":", 1)
264325 try:
265326 port = int(out[1])
266327 except ValueError:
267 raise ValueError("Invalid host:port '%s'" % s)
268 return out[0], port
269
270 # try a .well-known lookup
271 if not skip_well_known:
272 well_known = MatrixConnectionAdapter.get_well_known(s)
273 if well_known:
274 return MatrixConnectionAdapter.lookup(well_known, skip_well_known=True)
328 raise ValueError("Invalid host:port '%s'" % (server_name,))
329 return out[0], port, out[0]
275330
276331 try:
277 srv = srvlookup.lookup("matrix", "tcp", s)[0]
278 return srv.host, srv.port
332 srv = srvlookup.lookup("matrix", "tcp", server_name)[0]
333 print(
334 f"SRV lookup on _matrix._tcp.{server_name} gave {srv}",
335 file=sys.stderr,
336 )
337 return srv.host, srv.port, server_name
279338 except Exception:
280 return s, 8448
339 return server_name, 8448, server_name
281340
282341 @staticmethod
283 def get_well_known(server_name: str) -> Optional[str]:
284 uri = "https://%s/.well-known/matrix/server" % (server_name,)
285 print("fetching %s" % (uri,), file=sys.stderr)
342 def _get_well_known(server_name: str) -> Optional[str]:
343 if ":" in server_name:
344 # explicit port, or ipv6 literal. Either way, no .well-known
345 return None
346
347 # TODO: check for ipv4 literals
348
349 uri = f"https://{server_name}/.well-known/matrix/server"
350 print(f"fetching {uri}", file=sys.stderr)
286351
287352 try:
288353 resp = requests.get(uri)
303368 print("Invalid response from %s: %s" % (uri, e), file=sys.stderr)
304369 return None
305370
306 def get_connection(
307 self, url: str, proxies: Optional[Dict[str, str]] = None
308 ) -> HTTPConnectionPool:
309 parsed = urlparse.urlparse(url)
310
311 (host, port) = self.lookup(parsed.netloc)
312 netloc = "%s:%d" % (host, port)
313 print("Connecting to %s" % (netloc,), file=sys.stderr)
314 url = urlparse.urlunparse(
315 ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment)
316 )
317 return super().get_connection(url, proxies)
318
319371
320372 if __name__ == "__main__":
321373 main()
2525
2626 class FilteredPushRules:
2727 def __init__(
28 self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool
28 self,
29 push_rules: PushRules,
30 enabled_map: Dict[str, bool],
31 msc3664_enabled: bool,
32 msc1767_enabled: bool,
2933 ): ...
3034 def rules(self) -> Collection[Tuple[PushRule, bool]]: ...
3135
4044 notification_power_levels: Mapping[str, int],
4145 related_events_flattened: Mapping[str, Mapping[str, str]],
4246 related_event_match_enabled: bool,
47 room_version_feature_flags: list[str],
48 msc3931_enabled: bool,
4349 ): ...
4450 def run(
4551 self,
712712 set to the reason code from the HTTP response.
713713
714714 Returns:
715 SynapseError:
715 The error converted to a SynapseError.
716716 """
717717 # try to parse the body as json, to get better errcode/msg, but
718718 # default to M_UNKNOWN with the HTTP status as the error text
1111 # See the License for the specific language governing permissions and
1212 # limitations under the License.
1313
14 from typing import Callable, Dict, Optional
14 from typing import Callable, Dict, List, Optional
1515
1616 import attr
1717
4848 class RoomDisposition:
4949 STABLE = "stable"
5050 UNSTABLE = "unstable"
51
52
53 class PushRuleRoomFlag:
54 """Enum for listing possible MSC3931 room version feature flags, for push rules"""
55
56 # MSC3932: Room version supports MSC1767 Extensible Events.
57 EXTENSIBLE_EVENTS = "org.matrix.msc3932.extensible_events"
5158
5259
5360 @attr.s(slots=True, frozen=True, auto_attribs=True)
9097 msc3787_knock_restricted_join_rule: bool
9198 # MSC3667: Enforce integer power levels
9299 msc3667_int_only_power_levels: bool
100 # MSC3931: Adds a push rule condition for "room version feature flags", making
101 # some push rules room version dependent. Note that adding a flag to this list
102 # is not enough to mark it "supported": the push rule evaluator also needs to
103 # support the flag. Unknown flags are ignored by the evaluator, making conditions
104 # fail if used.
105 msc3931_push_features: List[str] # values from PushRuleRoomFlag
93106
94107
95108 class RoomVersions:
110123 msc2716_redactions=False,
111124 msc3787_knock_restricted_join_rule=False,
112125 msc3667_int_only_power_levels=False,
126 msc3931_push_features=[],
113127 )
114128 V2 = RoomVersion(
115129 "2",
128142 msc2716_redactions=False,
129143 msc3787_knock_restricted_join_rule=False,
130144 msc3667_int_only_power_levels=False,
145 msc3931_push_features=[],
131146 )
132147 V3 = RoomVersion(
133148 "3",
146161 msc2716_redactions=False,
147162 msc3787_knock_restricted_join_rule=False,
148163 msc3667_int_only_power_levels=False,
164 msc3931_push_features=[],
149165 )
150166 V4 = RoomVersion(
151167 "4",
164180 msc2716_redactions=False,
165181 msc3787_knock_restricted_join_rule=False,
166182 msc3667_int_only_power_levels=False,
183 msc3931_push_features=[],
167184 )
168185 V5 = RoomVersion(
169186 "5",
182199 msc2716_redactions=False,
183200 msc3787_knock_restricted_join_rule=False,
184201 msc3667_int_only_power_levels=False,
202 msc3931_push_features=[],
185203 )
186204 V6 = RoomVersion(
187205 "6",
200218 msc2716_redactions=False,
201219 msc3787_knock_restricted_join_rule=False,
202220 msc3667_int_only_power_levels=False,
221 msc3931_push_features=[],
203222 )
204223 MSC2176 = RoomVersion(
205224 "org.matrix.msc2176",
218237 msc2716_redactions=False,
219238 msc3787_knock_restricted_join_rule=False,
220239 msc3667_int_only_power_levels=False,
240 msc3931_push_features=[],
221241 )
222242 V7 = RoomVersion(
223243 "7",
236256 msc2716_redactions=False,
237257 msc3787_knock_restricted_join_rule=False,
238258 msc3667_int_only_power_levels=False,
259 msc3931_push_features=[],
239260 )
240261 V8 = RoomVersion(
241262 "8",
254275 msc2716_redactions=False,
255276 msc3787_knock_restricted_join_rule=False,
256277 msc3667_int_only_power_levels=False,
278 msc3931_push_features=[],
257279 )
258280 V9 = RoomVersion(
259281 "9",
272294 msc2716_redactions=False,
273295 msc3787_knock_restricted_join_rule=False,
274296 msc3667_int_only_power_levels=False,
297 msc3931_push_features=[],
275298 )
276299 MSC3787 = RoomVersion(
277300 "org.matrix.msc3787",
290313 msc2716_redactions=False,
291314 msc3787_knock_restricted_join_rule=True,
292315 msc3667_int_only_power_levels=False,
316 msc3931_push_features=[],
293317 )
294318 V10 = RoomVersion(
295319 "10",
308332 msc2716_redactions=False,
309333 msc3787_knock_restricted_join_rule=True,
310334 msc3667_int_only_power_levels=True,
335 msc3931_push_features=[],
311336 )
312337 MSC2716v4 = RoomVersion(
313338 "org.matrix.msc2716v4",
326351 msc2716_redactions=True,
327352 msc3787_knock_restricted_join_rule=False,
328353 msc3667_int_only_power_levels=False,
354 msc3931_push_features=[],
355 )
356 MSC1767v10 = RoomVersion(
357 # MSC1767 (Extensible Events) based on room version "10"
358 "org.matrix.msc1767.10",
359 RoomDisposition.UNSTABLE,
360 EventFormatVersions.ROOM_V4_PLUS,
361 StateResolutionVersions.V2,
362 enforce_key_validity=True,
363 special_case_aliases_auth=False,
364 strict_canonicaljson=True,
365 limit_notifications_power_levels=True,
366 msc2176_redaction_rules=False,
367 msc3083_join_rules=True,
368 msc3375_redaction_rules=True,
369 msc2403_knocking=True,
370 msc2716_historical=False,
371 msc2716_redactions=False,
372 msc3787_knock_restricted_join_rule=True,
373 msc3667_int_only_power_levels=True,
374 msc3931_push_features=[PushRuleRoomFlag.EXTENSIBLE_EVENTS],
329375 )
330376
331377
265265 reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper()))
266266
267267
268 def listen_metrics(
269 bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool
270 ) -> None:
268 def listen_metrics(bind_addresses: Iterable[str], port: int) -> None:
271269 """
272270 Start Prometheus metrics server.
273271 """
274272 from prometheus_client import start_http_server as start_http_server_prometheus
275273
276 from synapse.metrics import (
277 RegistryProxy,
278 start_http_server as start_http_server_legacy,
279 )
274 from synapse.metrics import RegistryProxy
280275
281276 for host in bind_addresses:
282277 logger.info("Starting metrics listener on %s:%d", host, port)
283 if enable_legacy_metric_names:
284 start_http_server_legacy(port, addr=host, registry=RegistryProxy)
285 else:
286 _set_prometheus_client_use_created_metrics(False)
287 start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
278 _set_prometheus_client_use_created_metrics(False)
279 start_http_server_prometheus(port, addr=host, registry=RegistryProxy)
288280
289281
290282 def _set_prometheus_client_use_created_metrics(new_value: bool) -> None:
1313 # limitations under the License.
1414 import logging
1515 import sys
16 from typing import Dict, List, Optional, Tuple
17
18 from twisted.internet import address
16 from typing import Dict, List
17
1918 from twisted.web.resource import Resource
2019
2120 import synapse
2221 import synapse.events
23 from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError
2422 from synapse.api.urls import (
2523 CLIENT_API_PREFIX,
2624 FEDERATION_PREFIX,
4240 from synapse.config.server import ListenerConfig
4341 from synapse.federation.transport.server import TransportLayerServer
4442 from synapse.http.server import JsonResource, OptionsResource
45 from synapse.http.servlet import RestServlet, parse_json_object_from_request
46 from synapse.http.site import SynapseRequest
4743 from synapse.logging.context import LoggingContext
4844 from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy
4945 from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource
6965 versions,
7066 voip,
7167 )
72 from synapse.rest.client._base import client_patterns
7368 from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet
7469 from synapse.rest.client.devices import DevicesRestServlet
7570 from synapse.rest.client.keys import (
7671 KeyChangesServlet,
7772 KeyQueryServlet,
73 KeyUploadServlet,
7874 OneTimeKeyServlet,
7975 )
8076 from synapse.rest.client.register import (
131127 from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore
132128 from synapse.storage.databases.main.user_directory import UserDirectoryStore
133129 from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore
134 from synapse.types import JsonDict
135130 from synapse.util import SYNAPSE_VERSION
136131 from synapse.util.httpresourcetree import create_resource_tree
137132
138133 logger = logging.getLogger("synapse.app.generic_worker")
139
140
141 class KeyUploadServlet(RestServlet):
142 """An implementation of the `KeyUploadServlet` that responds to read only
143 requests, but otherwise proxies through to the master instance.
144 """
145
146 PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
147
148 def __init__(self, hs: HomeServer):
149 """
150 Args:
151 hs: server
152 """
153 super().__init__()
154 self.auth = hs.get_auth()
155 self.store = hs.get_datastores().main
156 self.http_client = hs.get_simple_http_client()
157 self.main_uri = hs.config.worker.worker_main_http_uri
158
159 async def on_POST(
160 self, request: SynapseRequest, device_id: Optional[str]
161 ) -> Tuple[int, JsonDict]:
162 requester = await self.auth.get_user_by_req(request, allow_guest=True)
163 user_id = requester.user.to_string()
164 body = parse_json_object_from_request(request)
165
166 if device_id is not None:
167 # passing the device_id here is deprecated; however, we allow it
168 # for now for compatibility with older clients.
169 if requester.device_id is not None and device_id != requester.device_id:
170 logger.warning(
171 "Client uploading keys for a different device "
172 "(logged in as %s, uploading for %s)",
173 requester.device_id,
174 device_id,
175 )
176 else:
177 device_id = requester.device_id
178
179 if device_id is None:
180 raise SynapseError(
181 400, "To upload keys, you must pass device_id when authenticating"
182 )
183
184 if body:
185 # They're actually trying to upload something, proxy to main synapse.
186
187 # Proxy headers from the original request, such as the auth headers
188 # (in case the access token is there) and the original IP /
189 # User-Agent of the request.
190 headers: Dict[bytes, List[bytes]] = {
191 header: list(request.requestHeaders.getRawHeaders(header, []))
192 for header in (b"Authorization", b"User-Agent")
193 }
194 # Add the previous hop to the X-Forwarded-For header.
195 x_forwarded_for = list(
196 request.requestHeaders.getRawHeaders(b"X-Forwarded-For", [])
197 )
198 # we use request.client here, since we want the previous hop, not the
199 # original client (as returned by request.getClientAddress()).
200 if isinstance(request.client, (address.IPv4Address, address.IPv6Address)):
201 previous_host = request.client.host.encode("ascii")
202 # If the header exists, add to the comma-separated list of the first
203 # instance of the header. Otherwise, generate a new header.
204 if x_forwarded_for:
205 x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host]
206 x_forwarded_for.extend(x_forwarded_for[1:])
207 else:
208 x_forwarded_for = [previous_host]
209 headers[b"X-Forwarded-For"] = x_forwarded_for
210
211 # Replicate the original X-Forwarded-Proto header. Note that
212 # XForwardedForRequest overrides isSecure() to give us the original protocol
213 # used by the client, as opposed to the protocol used by our upstream proxy
214 # - which is what we want here.
215 headers[b"X-Forwarded-Proto"] = [
216 b"https" if request.isSecure() else b"http"
217 ]
218
219 try:
220 result = await self.http_client.post_json_get_json(
221 self.main_uri + request.uri.decode("ascii"), body, headers=headers
222 )
223 except HttpResponseException as e:
224 raise e.to_synapse_error() from e
225 except RequestSendFailed as e:
226 raise SynapseError(502, "Failed to talk to master") from e
227
228 return 200, result
229 else:
230 # Just interested in counts.
231 result = await self.store.count_e2e_one_time_keys(user_id, device_id)
232 return 200, {"one_time_key_counts": result}
233134
234135
235136 class GenericWorkerSlavedStore(
418319 _base.listen_metrics(
419320 listener.bind_addresses,
420321 listener.port,
421 enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
422322 )
423323 else:
424324 logger.warning("Unsupported listener type: %s", listener.type)
264264 _base.listen_metrics(
265265 listener.bind_addresses,
266266 listener.port,
267 enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics,
268267 )
269268 else:
270269 # this shouldn't happen, as the listener type should have been checked
3131
3232 logger = logging.getLogger(__name__)
3333
34 # Type for the `device_one_time_key_counts` field in an appservice transaction
34 # Type for the `device_one_time_keys_count` field in an appservice transaction
3535 # user ID -> {device ID -> {algorithm -> count}}
36 TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]]
36 TransactionOneTimeKeysCount = Dict[str, Dict[str, Dict[str, int]]]
3737
3838 # Type for the `device_unused_fallback_key_types` field in an appservice transaction
3939 # user ID -> {device ID -> [algorithm]}
375375 events: List[EventBase],
376376 ephemeral: List[JsonDict],
377377 to_device_messages: List[JsonDict],
378 one_time_key_counts: TransactionOneTimeKeyCounts,
378 one_time_keys_count: TransactionOneTimeKeysCount,
379379 unused_fallback_keys: TransactionUnusedFallbackKeys,
380380 device_list_summary: DeviceListUpdates,
381381 ):
384384 self.events = events
385385 self.ephemeral = ephemeral
386386 self.to_device_messages = to_device_messages
387 self.one_time_key_counts = one_time_key_counts
387 self.one_time_keys_count = one_time_keys_count
388388 self.unused_fallback_keys = unused_fallback_keys
389389 self.device_list_summary = device_list_summary
390390
401401 events=self.events,
402402 ephemeral=self.ephemeral,
403403 to_device_messages=self.to_device_messages,
404 one_time_key_counts=self.one_time_key_counts,
404 one_time_keys_count=self.one_time_keys_count,
405405 unused_fallback_keys=self.unused_fallback_keys,
406406 device_list_summary=self.device_list_summary,
407407 txn_id=self.id,
2222 from synapse.api.errors import CodeMessageException
2323 from synapse.appservice import (
2424 ApplicationService,
25 TransactionOneTimeKeyCounts,
25 TransactionOneTimeKeysCount,
2626 TransactionUnusedFallbackKeys,
2727 )
2828 from synapse.events import EventBase
261261 events: List[EventBase],
262262 ephemeral: List[JsonDict],
263263 to_device_messages: List[JsonDict],
264 one_time_key_counts: TransactionOneTimeKeyCounts,
264 one_time_keys_count: TransactionOneTimeKeysCount,
265265 unused_fallback_keys: TransactionUnusedFallbackKeys,
266266 device_list_summary: DeviceListUpdates,
267267 txn_id: Optional[int] = None,
309309
310310 # TODO: Update to stable prefixes once MSC3202 completes FCP merge
311311 if service.msc3202_transaction_extensions:
312 if one_time_key_counts:
312 if one_time_keys_count:
313313 body[
314314 "org.matrix.msc3202.device_one_time_key_counts"
315 ] = one_time_key_counts
315 ] = one_time_keys_count
316 body[
317 "org.matrix.msc3202.device_one_time_keys_count"
318 ] = one_time_keys_count
316319 if unused_fallback_keys:
317320 body[
318321 "org.matrix.msc3202.device_unused_fallback_key_types"
6363 from synapse.appservice import (
6464 ApplicationService,
6565 ApplicationServiceState,
66 TransactionOneTimeKeyCounts,
66 TransactionOneTimeKeysCount,
6767 TransactionUnusedFallbackKeys,
6868 )
6969 from synapse.appservice.api import ApplicationServiceApi
257257 ):
258258 return
259259
260 one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None
260 one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None
261261 unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None
262262
263263 if (
268268 # for the users which are mentioned in this transaction,
269269 # as well as the appservice's sender.
270270 (
271 one_time_key_counts,
271 one_time_keys_count,
272272 unused_fallback_keys,
273273 ) = await self._compute_msc3202_otk_counts_and_fallback_keys(
274274 service, events, ephemeral, to_device_messages_to_send
280280 events,
281281 ephemeral,
282282 to_device_messages_to_send,
283 one_time_key_counts,
283 one_time_keys_count,
284284 unused_fallback_keys,
285285 device_list_summary,
286286 )
295295 events: Iterable[EventBase],
296296 ephemerals: Iterable[JsonDict],
297297 to_device_messages: Iterable[JsonDict],
298 ) -> Tuple[TransactionOneTimeKeyCounts, TransactionUnusedFallbackKeys]:
298 ) -> Tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]:
299299 """
300300 Given a list of the events, ephemeral messages and to-device messages,
301301 - first computes a list of application services users that may have
366366 events: List[EventBase],
367367 ephemeral: Optional[List[JsonDict]] = None,
368368 to_device_messages: Optional[List[JsonDict]] = None,
369 one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None,
369 one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None,
370370 unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None,
371371 device_list_summary: Optional[DeviceListUpdates] = None,
372372 ) -> None:
379379 events: The persistent events to include in the transaction.
380380 ephemeral: The ephemeral events to include in the transaction.
381381 to_device_messages: The to-device messages to include in the transaction.
382 one_time_key_counts: Counts of remaining one-time keys for relevant
382 one_time_keys_count: Counts of remaining one-time keys for relevant
383383 appservice devices in the transaction.
384384 unused_fallback_keys: Lists of unused fallback keys for relevant
385385 appservice devices in the transaction.
396396 events=events,
397397 ephemeral=ephemeral or [],
398398 to_device_messages=to_device_messages or [],
399 one_time_key_counts=one_time_key_counts or {},
399 one_time_keys_count=one_time_keys_count or {},
400400 unused_fallback_keys=unused_fallback_keys or {},
401401 device_list_summary=device_list_summary or DeviceListUpdates(),
402402 )
1515
1616 import attr
1717
18 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
1819 from synapse.config._base import Config
1920 from synapse.types import JsonDict
2021
5152
5253 # MSC3266 (room summary api)
5354 self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False)
54
55 # MSC3030 (Jump to date API endpoint)
56 self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False)
5755
5856 # MSC2409 (this setting only relates to optionally sending to-device messages).
5957 # Presence, typing and read receipt EDUs are already sent to application services that
130128
131129 # MSC3912: Relation-based redactions.
132130 self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False)
131
132 # MSC1767 and friends: Extensible Events
133 self.msc1767_enabled: bool = experimental.get("msc1767_enabled", False)
134 if self.msc1767_enabled:
135 # Enable room version (and thus applicable push rules from MSC3931/3932)
136 version_id = RoomVersions.MSC1767v10.identifier
137 KNOWN_ROOM_VERSIONS[version_id] = RoomVersions.MSC1767v10
316316 Set up the logging subsystem.
317317
318318 Args:
319 config (LoggingConfig | synapse.config.worker.WorkerConfig):
320 configuration data
321
322 use_worker_options (bool): True to use the 'worker_log_config' option
319 config: configuration data
320
321 use_worker_options: True to use the 'worker_log_config' option
323322 instead of 'log_config'.
324323
325324 logBeginner: The Twisted logBeginner to use.
4242 def read_config(self, config: JsonDict, **kwargs: Any) -> None:
4343 self.enable_metrics = config.get("enable_metrics", False)
4444
45 self.enable_legacy_metrics = config.get("enable_legacy_metrics", False)
46
4745 self.report_stats = config.get("report_stats", None)
4846 self.report_stats_endpoint = config.get(
4947 "report_stats_endpoint", "https://matrix.org/report-usage-stats/push"
149149
150150 self.rc_third_party_invite = RatelimitSettings(
151151 config.get("rc_third_party_invite", {}),
152 defaults={
153 "per_second": self.rc_message.per_second,
154 "burst_count": self.rc_message.burst_count,
155 },
152 defaults={"per_second": 0.0025, "burst_count": 5},
156153 )
2828 )
2929 from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def
3030
31 _FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """
32 The send_federation config option must be disabled in the main
33 synapse process before they can be run in a separate worker.
34
35 Please add ``send_federation: false`` to the main config
36 """
37
38 _PUSHER_WITH_START_PUSHERS_ENABLED_ERROR = """
39 The start_pushers config option must be disabled in the main
40 synapse process before they can be run in a separate worker.
41
42 Please add ``start_pushers: false`` to the main config
43 """
44
4531 _DEPRECATED_WORKER_DUTY_OPTION_USED = """
4632 The '%s' configuration option is deprecated and will be removed in a future
4733 Synapse version. Please use ``%s: name_of_worker`` instead.
161147 self.worker_name = config.get("worker_name", self.worker_app)
162148 self.instance_name = self.worker_name or "master"
163149
150 # FIXME: Remove this check after a suitable amount of time.
164151 self.worker_main_http_uri = config.get("worker_main_http_uri", None)
152 if self.worker_main_http_uri is not None:
153 logger.warning(
154 "The config option worker_main_http_uri is unused since Synapse 1.73. "
155 "It can be safely removed from your configuration."
156 )
165157
166158 # This option is really only here to support `--manhole` command line
167159 # argument.
175167 )
176168 )
177169
178 # Handle federation sender configuration.
179 #
180 # There are two ways of configuring which instances handle federation
181 # sending:
182 # 1. The old way where "send_federation" is set to false and running a
183 # `synapse.app.federation_sender` worker app.
184 # 2. Specifying the workers sending federation in
185 # `federation_sender_instances`.
186 #
187
188 send_federation = config.get("send_federation", True)
189
190 federation_sender_instances = config.get("federation_sender_instances")
191 if federation_sender_instances is None:
192 # Default to an empty list, which means "another, unknown, worker is
193 # responsible for it".
194 federation_sender_instances = []
195
196 # If no federation sender instances are set we check if
197 # `send_federation` is set, which means use master
198 if send_federation:
199 federation_sender_instances = ["master"]
200
201 if self.worker_app == "synapse.app.federation_sender":
202 if send_federation:
203 # If we're running federation senders, and not using
204 # `federation_sender_instances`, then we should have
205 # explicitly set `send_federation` to false.
206 raise ConfigError(
207 _FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR
208 )
209
210 federation_sender_instances = [self.worker_name]
211
170 federation_sender_instances = self._worker_names_performing_this_duty(
171 config,
172 "send_federation",
173 "synapse.app.federation_sender",
174 "federation_sender_instances",
175 )
212176 self.send_federation = self.instance_name in federation_sender_instances
213177 self.federation_shard_config = ShardedWorkerHandlingConfig(
214178 federation_sender_instances
275239 )
276240
277241 # Handle sharded push
278 start_pushers = config.get("start_pushers", True)
279 pusher_instances = config.get("pusher_instances")
280 if pusher_instances is None:
281 # Default to an empty list, which means "another, unknown, worker is
282 # responsible for it".
283 pusher_instances = []
284
285 # If no pushers instances are set we check if `start_pushers` is
286 # set, which means use master
287 if start_pushers:
288 pusher_instances = ["master"]
289
290 if self.worker_app == "synapse.app.pusher":
291 if start_pushers:
292 # If we're running pushers, and not using
293 # `pusher_instances`, then we should have explicitly set
294 # `start_pushers` to false.
295 raise ConfigError(_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR)
296
297 pusher_instances = [self.instance_name]
298
242 pusher_instances = self._worker_names_performing_this_duty(
243 config,
244 "start_pushers",
245 "synapse.app.pusher",
246 "pusher_instances",
247 )
299248 self.start_pushers = self.instance_name in pusher_instances
300249 self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances)
301250
418367 # (By this point, these are either the same value or only one is not None.)
419368 return bool(new_option_should_run_here or legacy_option_should_run_here)
420369
370 def _worker_names_performing_this_duty(
371 self,
372 config: Dict[str, Any],
373 legacy_option_name: str,
374 legacy_app_name: str,
375 modern_instance_list_name: str,
376 ) -> List[str]:
377 """
378 Retrieves the names of the workers handling a given duty, by either legacy
379 option or instance list.
380
381 There are two ways of configuring which instances handle a given duty, e.g.
382 for configuring pushers:
383
384 1. The old way where "start_pushers" is set to false and running a
385 `synapse.app.pusher'` worker app.
386 2. Specifying the workers sending federation in `pusher_instances`.
387
388 Args:
389 config: settings read from yaml.
390 legacy_option_name: the old way of enabling options. e.g. 'start_pushers'
391 legacy_app_name: The historical app name. e.g. 'synapse.app.pusher'
392 modern_instance_list_name: the string name of the new instance_list. e.g.
393 'pusher_instances'
394
395 Returns:
396 A list of worker instance names handling the given duty.
397 """
398
399 legacy_option = config.get(legacy_option_name, True)
400
401 worker_instances = config.get(modern_instance_list_name)
402 if worker_instances is None:
403 # Default to an empty list, which means "another, unknown, worker is
404 # responsible for it".
405 worker_instances = []
406
407 # If no worker instances are set we check if the legacy option
408 # is set, which means use the main process.
409 if legacy_option:
410 worker_instances = ["master"]
411
412 if self.worker_app == legacy_app_name:
413 if legacy_option:
414 # If we're using `legacy_app_name`, and not using
415 # `modern_instance_list_name`, then we should have
416 # explicitly set `legacy_option_name` to false.
417 raise ConfigError(
418 f"The '{legacy_option_name}' config option must be disabled in "
419 "the main synapse process before they can be run in a separate "
420 "worker.\n"
421 f"Please add `{legacy_option_name}: false` to the main config.\n",
422 )
423
424 worker_instances = [self.worker_name]
425
426 return worker_instances
427
421428 def read_arguments(self, args: argparse.Namespace) -> None:
422429 # We support a bunch of command line arguments that override options in
423430 # the config. A lot of these options have a worker_* prefix when running
212212
213213 def verify_json_objects_for_server(
214214 self, server_and_json: Iterable[Tuple[str, dict, int]]
215 ) -> List[defer.Deferred]:
215 ) -> List["defer.Deferred[None]"]:
216216 """Bulk verifies signatures of json objects, bulk fetching keys as
217217 necessary.
218218
225225 valid.
226226
227227 Returns:
228 List<Deferred[None]>: for each input triplet, a deferred indicating success
229 or failure to verify each json object's signature for the given
230 server_name. The deferreds run their callbacks in the sentinel
231 logcontext.
228 For each input triplet, a deferred indicating success or failure to
229 verify each json object's signature for the given server_name. The
230 deferreds run their callbacks in the sentinel logcontext.
232231 """
233232 return [
234233 run_in_background(
857856 response = await self.client.get_json(
858857 destination=server_name,
859858 path="/_matrix/key/v2/server/"
860 + urllib.parse.quote(requested_key_id),
859 + urllib.parse.quote(requested_key_id, safe=""),
861860 ignore_backoff=True,
862861 # we only give the remote server 10s to respond. It should be an
863862 # easy request to handle, so if it doesn't reply within 10s, it's
596596 format_version: The event format version
597597
598598 Returns:
599 type: A type that can be initialized as per the initializer of
600 `FrozenEvent`
599 A type that can be initialized as per the initializer of `FrozenEvent`
601600 """
602601
603602 if format_version == EventFormatVersions.ROOM_V1_V2:
127127 state_filter=StateFilter.from_types(
128128 auth_types_for_event(self.room_version, self)
129129 ),
130 await_full_state=False,
130131 )
131132 auth_event_ids = self._event_auth_handler.compute_auth_events(
132133 self, state_ids
16901690 # to return events on *both* sides of the timestamp to
16911691 # help reconcile the gap faster.
16921692 _timestamp_to_event_from_destination,
1693 # Since this endpoint is new, we should try other servers before giving up.
1694 # We can safely remove this in a year (remove after 2023-11-16).
1695 failover_on_unknown_endpoint=True,
16931696 )
16941697 return timestamp_to_event_response
1695 except SynapseError:
1698 except SynapseError as e:
1699 logger.warn(
1700 "timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s",
1701 room_id,
1702 timestamp,
1703 direction,
1704 e,
1705 )
16961706 return None
16971707
16981708 async def _timestamp_to_event_from_destination(
433433 # If there are no prev event IDs then the state is empty
434434 # and so no remote servers in the room
435435 destinations = set()
436 else:
436
437 if destinations is None:
438 # During partial join we use the set of servers that we got
439 # when beginning the join. It's still possible that we send
440 # events to servers that left the room in the meantime, but
441 # we consider that an acceptable risk since it is only our own
442 # events that we leak and not other server's ones.
443 partial_state_destinations = (
444 await self.store.get_partial_state_servers_at_join(
445 event.room_id
446 )
447 )
448
449 if len(partial_state_destinations) > 0:
450 destinations = partial_state_destinations
451
452 if destinations is None:
437453 # We check the external cache for the destinations, which is
438454 # stored per state group.
439455
3434 from synapse.logging.opentracing import SynapseTags, set_tag
3535 from synapse.metrics import sent_transactions_counter
3636 from synapse.metrics.background_process_metrics import run_as_background_process
37 from synapse.types import ReadReceipt
37 from synapse.types import JsonDict, ReadReceipt
3838 from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter
3939 from synapse.visibility import filter_events_for_server
4040
135135 # destination
136136 self._pending_presence: Dict[str, UserPresenceState] = {}
137137
138 # room_id -> receipt_type -> user_id -> receipt_dict
139 self._pending_rrs: Dict[str, Dict[str, Dict[str, dict]]] = {}
138 # List of room_id -> receipt_type -> user_id -> receipt_dict,
139 #
140 # Each receipt can only have a single receipt per
141 # (room ID, receipt type, user ID, thread ID) tuple.
142 self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = []
140143 self._rrs_pending_flush = False
141144
142145 # stream_id of last successfully sent to-device message.
201204 Args:
202205 receipt: receipt to be queued
203206 """
204 self._pending_rrs.setdefault(receipt.room_id, {}).setdefault(
205 receipt.receipt_type, {}
206 )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data}
207 serialized_receipt: JsonDict = {
208 "event_ids": receipt.event_ids,
209 "data": receipt.data,
210 }
211 if receipt.thread_id is not None:
212 serialized_receipt["data"]["thread_id"] = receipt.thread_id
213
214 # Find which EDU to add this receipt to. There's three situations depending
215 # on the (room ID, receipt type, user, thread ID) tuple:
216 #
217 # 1. If it fully matches, clobber the information.
218 # 2. If it is missing, add the information.
219 # 3. If the subset tuple of (room ID, receipt type, user) matches, check
220 # the next EDU (or add a new EDU).
221 for edu in self._pending_receipt_edus:
222 receipt_content = edu.setdefault(receipt.room_id, {}).setdefault(
223 receipt.receipt_type, {}
224 )
225 # If this room ID, receipt type, user ID is not in this EDU, OR if
226 # the full tuple matches, use the current EDU.
227 if (
228 receipt.user_id not in receipt_content
229 or receipt_content[receipt.user_id].get("thread_id")
230 == receipt.thread_id
231 ):
232 receipt_content[receipt.user_id] = serialized_receipt
233 break
234
235 # If no matching EDU was found, create a new one.
236 else:
237 self._pending_receipt_edus.append(
238 {
239 receipt.room_id: {
240 receipt.receipt_type: {receipt.user_id: serialized_receipt}
241 }
242 }
243 )
207244
208245 def flush_read_receipts_for_room(self, room_id: str) -> None:
209 # if we don't have any read-receipts for this room, it may be that we've already
210 # sent them out, so we don't need to flush.
211 if room_id not in self._pending_rrs:
212 return
213 self._rrs_pending_flush = True
214 self.attempt_new_transaction()
246 # If there are any pending receipts for this room then force-flush them
247 # in a new transaction.
248 for edu in self._pending_receipt_edus:
249 if room_id in edu:
250 self._rrs_pending_flush = True
251 self.attempt_new_transaction()
252 # No use in checking remaining EDUs if the room was found.
253 break
215254
216255 def send_keyed_edu(self, edu: Edu, key: Hashable) -> None:
217256 self._pending_edus_keyed[(edu.edu_type, key)] = edu
350389 self._pending_edus = []
351390 self._pending_edus_keyed = {}
352391 self._pending_presence = {}
353 self._pending_rrs = {}
392 self._pending_receipt_edus = []
354393
355394 self._start_catching_up()
356395 except FederationDeniedError as e:
504543 new_pdus = await filter_events_for_server(
505544 self._storage_controllers,
506545 self._destination,
546 self._server_name,
507547 new_pdus,
508548 redact=False,
509549 )
541581 self._destination, last_successful_stream_ordering
542582 )
543583
544 def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]:
545 if not self._pending_rrs:
584 def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]:
585 if not self._pending_receipt_edus:
546586 return
547587 if not force_flush and not self._rrs_pending_flush:
548588 # not yet time for this lot
549589 return
550590
551 edu = Edu(
552 origin=self._server_name,
553 destination=self._destination,
554 edu_type=EduTypes.RECEIPT,
555 content=self._pending_rrs,
556 )
557 self._pending_rrs = {}
558 self._rrs_pending_flush = False
559 yield edu
591 # Send at most limit EDUs for receipts.
592 for content in self._pending_receipt_edus[:limit]:
593 yield Edu(
594 origin=self._server_name,
595 destination=self._destination,
596 edu_type=EduTypes.RECEIPT,
597 content=content,
598 )
599 self._pending_receipt_edus = self._pending_receipt_edus[limit:]
600
601 # If there are still pending read-receipts, don't reset the pending flush
602 # flag.
603 if not self._pending_receipt_edus:
604 self._rrs_pending_flush = False
560605
561606 def _pop_pending_edus(self, limit: int) -> List[Edu]:
562607 pending_edus = self._pending_edus
643688 async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]:
644689 # First we calculate the EDUs we want to send, if any.
645690
646 # We start by fetching device related EDUs, i.e device updates and to
647 # device messages. We have to keep 2 free slots for presence and rr_edus.
648 device_edu_limit = MAX_EDUS_PER_TRANSACTION - 2
649
650 # We prioritize to-device messages so that existing encryption channels
651 # work. We also keep a few slots spare (by reducing the limit) so that
652 # we can still trickle out some device list updates.
653 (
654 to_device_edus,
655 device_stream_id,
656 ) = await self.queue._get_to_device_message_edus(device_edu_limit - 10)
657
658 if to_device_edus:
659 self._device_stream_id = device_stream_id
660 else:
661 self.queue._last_device_stream_id = device_stream_id
662
663 device_edu_limit -= len(to_device_edus)
664
665 device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
666 device_edu_limit
667 )
668
669 if device_update_edus:
670 self._device_list_id = dev_list_id
671 else:
672 self.queue._last_device_list_stream_id = dev_list_id
673
674 pending_edus = device_update_edus + to_device_edus
675
676 # Now add the read receipt EDU.
677 pending_edus.extend(self.queue._get_rr_edus(force_flush=False))
678
679 # And presence EDU.
691 # There's a maximum number of EDUs that can be sent with a transaction,
692 # generally device updates and to-device messages get priority, but we
693 # want to ensure that there's room for some other EDUs as well.
694 #
695 # This is done by:
696 #
697 # * Add a presence EDU, if one exists.
698 # * Add up-to a small limit of read receipt EDUs.
699 # * Add to-device EDUs, but leave some space for device list updates.
700 # * Add device list updates EDUs.
701 # * If there's any remaining room, add other EDUs.
702 pending_edus = []
703
704 # Add presence EDU.
680705 if self.queue._pending_presence:
681706 pending_edus.append(
682707 Edu(
695720 )
696721 self.queue._pending_presence = {}
697722
723 # Add read receipt EDUs.
724 pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5))
725 edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus)
726
727 # Next, prioritize to-device messages so that existing encryption channels
728 # work. We also keep a few slots spare (by reducing the limit) so that
729 # we can still trickle out some device list updates.
730 (
731 to_device_edus,
732 device_stream_id,
733 ) = await self.queue._get_to_device_message_edus(edu_limit - 10)
734
735 if to_device_edus:
736 self._device_stream_id = device_stream_id
737 else:
738 self.queue._last_device_stream_id = device_stream_id
739
740 pending_edus.extend(to_device_edus)
741 edu_limit -= len(to_device_edus)
742
743 # Add device list update EDUs.
744 device_update_edus, dev_list_id = await self.queue._get_device_update_edus(
745 edu_limit
746 )
747
748 if device_update_edus:
749 self._device_list_id = dev_list_id
750 else:
751 self.queue._last_device_list_stream_id = dev_list_id
752
753 pending_edus.extend(device_update_edus)
754 edu_limit -= len(device_update_edus)
755
698756 # Finally add any other types of EDUs if there is room.
699 pending_edus.extend(
700 self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus))
701 )
702 while (
703 len(pending_edus) < MAX_EDUS_PER_TRANSACTION
704 and self.queue._pending_edus_keyed
705 ):
757 other_edus = self.queue._pop_pending_edus(edu_limit)
758 pending_edus.extend(other_edus)
759 edu_limit -= len(other_edus)
760 while edu_limit > 0 and self.queue._pending_edus_keyed:
706761 _, val = self.queue._pending_edus_keyed.popitem()
707762 pending_edus.append(val)
763 edu_limit -= 1
708764
709765 # Now we look for any PDUs to send, by getting up to 50 PDUs from the
710766 # queue
715771
716772 # if we've decided to send a transaction anyway, and we have room, we
717773 # may as well send any pending RRs
718 if len(pending_edus) < MAX_EDUS_PER_TRANSACTION:
719 pending_edus.extend(self.queue._get_rr_edus(force_flush=True))
774 if edu_limit:
775 pending_edus.extend(
776 self.queue._get_receipt_edus(force_flush=True, limit=edu_limit)
777 )
720778
721779 if self._pdus:
722780 self._last_stream_ordering = self._pdus[
184184 Raises:
185185 Various exceptions when the request fails
186186 """
187 path = _create_path(
188 FEDERATION_UNSTABLE_PREFIX,
189 "/org.matrix.msc3030/timestamp_to_event/%s",
187 path = _create_v1_path(
188 "/timestamp_to_event/%s",
190189 room_id,
191190 )
192191
279278 Note that this does not append any events to any graphs.
280279
281280 Args:
282 destination (str): address of remote homeserver
283 room_id (str): room to join/leave
284 user_id (str): user to be joined/left
285 membership (str): one of join/leave
286 params (dict[str, str|Iterable[str]]): Query parameters to include in the
287 request.
281 destination: address of remote homeserver
282 room_id: room to join/leave
283 user_id: user to be joined/left
284 membership: one of join/leave
285 params: Query parameters to include in the request.
288286
289287 Returns:
290288 Succeeds when we get a 2xx HTTP response. The result
2424 from synapse.federation.transport.server.federation import (
2525 FEDERATION_SERVLET_CLASSES,
2626 FederationAccountStatusServlet,
27 FederationTimestampLookupServlet,
2827 )
2928 from synapse.http.server import HttpServer, JsonResource
3029 from synapse.http.servlet import (
290289 )
291290
292291 for servletclass in SERVLET_GROUPS[servlet_group]:
293 # Only allow the `/timestamp_to_event` servlet if msc3030 is enabled
294 if (
295 servletclass == FederationTimestampLookupServlet
296 and not hs.config.experimental.msc3030_enabled
297 ):
298 continue
299
300292 # Only allow the `/account_status` servlet if msc3720 is enabled
301293 if (
302294 servletclass == FederationAccountStatusServlet
223223
224224 With arguments:
225225
226 origin (unicode|None): The authenticated server_name of the calling server,
226 origin (str|None): The authenticated server_name of the calling server,
227227 unless REQUIRE_AUTH is set to False and authentication failed.
228228
229 content (unicode|None): decoded json body of the request. None if the
229 content (str|None): decoded json body of the request. None if the
230230 request was a GET.
231231
232232 query (dict[bytes, list[bytes]]): Query params from the request. url-decoded
217217 `dir` can be `f` or `b` to indicate forwards and backwards in time from the
218218 given timestamp.
219219
220 GET /_matrix/federation/unstable/org.matrix.msc3030/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>
220 GET /_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>
221221 {
222222 "event_id": ...
223223 }
224224 """
225225
226226 PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?"
227 PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3030"
228227
229228 async def on_GET(
230229 self,
1515 from typing import TYPE_CHECKING, Optional
1616
1717 from synapse.api.errors import SynapseError
18 from synapse.handlers.device import DeviceHandler
1819 from synapse.metrics.background_process_metrics import run_as_background_process
1920 from synapse.types import Codes, Requester, UserID, create_requester
2021
7475 Returns:
7576 True if identity server supports removing threepids, otherwise False.
7677 """
78
79 # This can only be called on the main process.
80 assert isinstance(self._device_handler, DeviceHandler)
7781
7882 # Check if this user can be deactivated
7983 if not await self._third_party_rules.check_can_deactivate_user(
6464
6565
6666 class DeviceWorkerHandler:
67 device_list_updater: "DeviceListWorkerUpdater"
68
6769 def __init__(self, hs: "HomeServer"):
6870 self.clock = hs.get_clock()
6971 self.hs = hs
7577 self.server_name = hs.hostname
7678 self._msc3852_enabled = hs.config.experimental.msc3852_enabled
7779
80 self.device_list_updater = DeviceListWorkerUpdater(hs)
81
7882 @trace
7983 async def get_devices_by_user(self, user_id: str) -> List[JsonDict]:
8084 """
97101
98102 log_kv(device_map)
99103 return devices
104
105 async def get_dehydrated_device(
106 self, user_id: str
107 ) -> Optional[Tuple[str, JsonDict]]:
108 """Retrieve the information for a dehydrated device.
109
110 Args:
111 user_id: the user whose dehydrated device we are looking for
112 Returns:
113 a tuple whose first item is the device ID, and the second item is
114 the dehydrated device information
115 """
116 return await self.store.get_dehydrated_device(user_id)
100117
101118 @trace
102119 async def get_device(self, user_id: str, device_id: str) -> JsonDict:
126143 @cancellable
127144 async def get_device_changes_in_shared_rooms(
128145 self, user_id: str, room_ids: Collection[str], from_token: StreamToken
129 ) -> Collection[str]:
146 ) -> Set[str]:
130147 """Get the set of users whose devices have changed who share a room with
131148 the given user.
132149 """
319336
320337
321338 class DeviceHandler(DeviceWorkerHandler):
339 device_list_updater: "DeviceListUpdater"
340
322341 def __init__(self, hs: "HomeServer"):
323342 super().__init__(hs)
324343
605624 await self.delete_devices(user_id, [old_device_id])
606625 return device_id
607626
608 async def get_dehydrated_device(
609 self, user_id: str
610 ) -> Optional[Tuple[str, JsonDict]]:
611 """Retrieve the information for a dehydrated device.
612
613 Args:
614 user_id: the user whose dehydrated device we are looking for
615 Returns:
616 a tuple whose first item is the device ID, and the second item is
617 the dehydrated device information
618 """
619 return await self.store.get_dehydrated_device(user_id)
620
621627 async def rehydrate_device(
622628 self, user_id: str, access_token: str, device_id: str
623629 ) -> dict:
681687 hosts_already_sent_to: Set[str] = set()
682688
683689 try:
690 stream_id, room_id = await self.store.get_device_change_last_converted_pos()
691
684692 while True:
685693 self._handle_new_device_update_new_data = False
686 rows = await self.store.get_uncoverted_outbound_room_pokes()
694 max_stream_id = self.store.get_device_stream_token()
695 rows = await self.store.get_uncoverted_outbound_room_pokes(
696 stream_id, room_id
697 )
687698 if not rows:
688699 # If the DB returned nothing then there is nothing left to
689700 # do, *unless* a new device list update happened during the
690701 # DB query.
702
703 # Advance `(stream_id, room_id)`.
704 # `max_stream_id` comes from *before* the query for unconverted
705 # rows, which means that any unconverted rows must have a larger
706 # stream ID.
707 if max_stream_id > stream_id:
708 stream_id, room_id = max_stream_id, ""
709 await self.store.set_device_change_last_converted_pos(
710 stream_id, room_id
711 )
712 else:
713 assert max_stream_id == stream_id
714 # Avoid moving `room_id` backwards.
715 pass
716
691717 if self._handle_new_device_update_new_data:
692718 continue
693719 else:
717743 user_id=user_id,
718744 device_id=device_id,
719745 room_id=room_id,
720 stream_id=stream_id,
721746 hosts=hosts,
722747 context=opentracing_context,
723748 )
751776 hosts_already_sent_to.update(hosts)
752777 current_stream_id = stream_id
753778
779 # Advance `(stream_id, room_id)`.
780 _, _, room_id, stream_id, _ = rows[-1]
781 await self.store.set_device_change_last_converted_pos(
782 stream_id, room_id
783 )
784
754785 finally:
755786 self._handle_new_device_update_is_processing = False
756787
833864 user_id=user_id,
834865 device_id=device_id,
835866 room_id=room_id,
836 stream_id=None,
837867 hosts=potentially_changed_hosts,
838868 context=None,
839869 )
857887 )
858888
859889
860 class DeviceListUpdater:
890 class DeviceListWorkerUpdater:
891 "Handles incoming device list updates from federation and contacts the main process over replication"
892
893 def __init__(self, hs: "HomeServer"):
894 from synapse.replication.http.devices import (
895 ReplicationUserDevicesResyncRestServlet,
896 )
897
898 self._user_device_resync_client = (
899 ReplicationUserDevicesResyncRestServlet.make_client(hs)
900 )
901
902 async def user_device_resync(
903 self, user_id: str, mark_failed_as_stale: bool = True
904 ) -> Optional[JsonDict]:
905 """Fetches all devices for a user and updates the device cache with them.
906
907 Args:
908 user_id: The user's id whose device_list will be updated.
909 mark_failed_as_stale: Whether to mark the user's device list as stale
910 if the attempt to resync failed.
911 Returns:
912 A dict with device info as under the "devices" in the result of this
913 request:
914 https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid
915 """
916 return await self._user_device_resync_client(user_id=user_id)
917
918
919 class DeviceListUpdater(DeviceListWorkerUpdater):
861920 "Handles incoming device list updates from federation and updates the DB"
862921
863922 def __init__(self, hs: "HomeServer", device_handler: DeviceHandler):
2626
2727 from synapse.api.constants import EduTypes
2828 from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError
29 from synapse.handlers.device import DeviceHandler
2930 from synapse.logging.context import make_deferred_yieldable, run_in_background
3031 from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace
31 from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet
3232 from synapse.types import (
3333 JsonDict,
3434 UserID,
5555 self.is_mine = hs.is_mine
5656 self.clock = hs.get_clock()
5757
58 self._edu_updater = SigningKeyEduUpdater(hs, self)
59
6058 federation_registry = hs.get_federation_registry()
6159
62 self._is_master = hs.config.worker.worker_app is None
63 if not self._is_master:
64 self._user_device_resync_client = (
65 ReplicationUserDevicesResyncRestServlet.make_client(hs)
66 )
67 else:
60 is_master = hs.config.worker.worker_app is None
61 if is_master:
62 edu_updater = SigningKeyEduUpdater(hs)
63
6864 # Only register this edu handler on master as it requires writing
6965 # device updates to the db
7066 federation_registry.register_edu_handler(
7167 EduTypes.SIGNING_KEY_UPDATE,
72 self._edu_updater.incoming_signing_key_update,
68 edu_updater.incoming_signing_key_update,
7369 )
7470 # also handle the unstable version
7571 # FIXME: remove this when enough servers have upgraded
7672 federation_registry.register_edu_handler(
7773 EduTypes.UNSTABLE_SIGNING_KEY_UPDATE,
78 self._edu_updater.incoming_signing_key_update,
74 edu_updater.incoming_signing_key_update,
7975 )
8076
8177 # doesn't really work as part of the generic query API, because the
318314 # probably be tracking their device lists. However, we haven't
319315 # done an initial sync on the device list so we do it now.
320316 try:
321 if self._is_master:
322 resync_results = await self.device_handler.device_list_updater.user_device_resync(
317 resync_results = (
318 await self.device_handler.device_list_updater.user_device_resync(
323319 user_id
324320 )
325 else:
326 resync_results = await self._user_device_resync_client(
327 user_id=user_id
328 )
321 )
322 if resync_results is None:
323 raise ValueError("Device resync failed")
329324
330325 # Add the device keys to the results.
331326 user_devices = resync_results["devices"]
604599 async def upload_keys_for_user(
605600 self, user_id: str, device_id: str, keys: JsonDict
606601 ) -> JsonDict:
602 # This can only be called from the main process.
603 assert isinstance(self.device_handler, DeviceHandler)
607604
608605 time_now = self.clock.time_msec()
609606
731728 user_id: the user uploading the keys
732729 keys: the signing keys
733730 """
731 # This can only be called from the main process.
732 assert isinstance(self.device_handler, DeviceHandler)
734733
735734 # if a master key is uploaded, then check it. Otherwise, load the
736735 # stored master key, to check signatures on other keys
822821 Raises:
823822 SynapseError: if the signatures dict is not valid.
824823 """
824 # This can only be called from the main process.
825 assert isinstance(self.device_handler, DeviceHandler)
826
825827 failures = {}
826828
827829 # signatures to be stored. Each item will be a SignatureListItem
869871 - signatures of the user's master key by the user's devices.
870872
871873 Args:
872 user_id (string): the user uploading the keys
874 user_id: the user uploading the keys
873875 signatures (dict[string, dict]): map of devices to signed keys
874876
875877 Returns:
11991201 A tuple of the retrieved key content, the key's ID and the matching VerifyKey.
12001202 If the key cannot be retrieved, all values in the tuple will instead be None.
12011203 """
1204 # This can only be called from the main process.
1205 assert isinstance(self.device_handler, DeviceHandler)
1206
12021207 try:
12031208 remote_result = await self.federation.query_user_devices(
12041209 user.domain, user.to_string()
13951400 class SigningKeyEduUpdater:
13961401 """Handles incoming signing key updates from federation and updates the DB"""
13971402
1398 def __init__(self, hs: "HomeServer", e2e_keys_handler: E2eKeysHandler):
1403 def __init__(self, hs: "HomeServer"):
13991404 self.store = hs.get_datastores().main
14001405 self.federation = hs.get_federation_client()
14011406 self.clock = hs.get_clock()
1402 self.e2e_keys_handler = e2e_keys_handler
1407
1408 device_handler = hs.get_device_handler()
1409 assert isinstance(device_handler, DeviceHandler)
1410 self._device_handler = device_handler
14031411
14041412 self._remote_edu_linearizer = Linearizer(name="remote_signing_key")
14051413
14441452 user_id: the user whose updates we are processing
14451453 """
14461454
1447 device_handler = self.e2e_keys_handler.device_handler
1448 device_list_updater = device_handler.device_list_updater
1449
14501455 async with self._remote_edu_linearizer.queue(user_id):
14511456 pending_updates = self._pending_updates.pop(user_id, [])
14521457 if not pending_updates:
14581463 logger.info("pending updates: %r", pending_updates)
14591464
14601465 for master_key, self_signing_key in pending_updates:
1461 new_device_ids = (
1462 await device_list_updater.process_cross_signing_key_update(
1463 user_id,
1464 master_key,
1465 self_signing_key,
1466 )
1466 new_device_ids = await self._device_handler.device_list_updater.process_cross_signing_key_update(
1467 user_id,
1468 master_key,
1469 self_signing_key,
14671470 )
14681471 device_ids = device_ids + new_device_ids
14691472
1470 await device_handler.notify_device_update(user_id, device_ids)
1473 await self._device_handler.notify_device_update(user_id, device_ids)
376376 """Deletes a given version of the user's e2e_room_keys backup
377377
378378 Args:
379 user_id(str): the user whose current backup version we're deleting
380 version(str): the version id of the backup being deleted
379 user_id: the user whose current backup version we're deleting
380 version: Optional. the version ID of the backup version we're deleting
381 If missing, we delete the current backup version info.
381382 Raises:
382383 NotFoundError: if this backup version doesn't exist
383384 """
4444 def __init__(self, hs: "HomeServer"):
4545 self._clock = hs.get_clock()
4646 self._store = hs.get_datastores().main
47 self._state_storage_controller = hs.get_storage_controllers().state
4748 self._server_name = hs.hostname
4849
4950 async def check_auth_rules_from_context(
178179 this function may return an incorrect result as we are not able to fully
179180 track server membership in a room without full state.
180181 """
181 if not allow_partial_state_rooms and await self._store.is_partial_state_room(
182 room_id
183 ):
184 raise AuthError(
185 403,
186 "Unable to authorise you right now; room is partial-stated here.",
187 errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
188 )
189
190 if not await self.is_host_in_room(room_id, host):
191 raise AuthError(403, "Host not in room.")
182 if await self._store.is_partial_state_room(room_id):
183 if allow_partial_state_rooms:
184 current_hosts = await self._state_storage_controller.get_current_hosts_in_room_or_partial_state_approximation(
185 room_id
186 )
187 if host not in current_hosts:
188 raise AuthError(403, "Host not in room (partial-state approx).")
189 else:
190 raise AuthError(
191 403,
192 "Unable to authorise you right now; room is partial-stated here.",
193 errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE,
194 )
195 else:
196 if not await self.is_host_in_room(room_id, host):
197 raise AuthError(403, "Host not in room.")
192198
193199 async def check_restricted_join_rules(
194200 self,
377377 # positives from users having been erased.
378378 filtered_extremities = await filter_events_for_server(
379379 self._storage_controllers,
380 self.server_name,
380381 self.server_name,
381382 events_to_check,
382383 redact=False,
12301231 async def on_backfill_request(
12311232 self, origin: str, room_id: str, pdu_list: List[str], limit: int
12321233 ) -> List[EventBase]:
1233 await self._event_auth_handler.assert_host_in_room(room_id, origin)
1234 # We allow partially joined rooms since in this case we are filtering out
1235 # non-local events in `filter_events_for_server`.
1236 await self._event_auth_handler.assert_host_in_room(room_id, origin, True)
12341237
12351238 # Synapse asks for 100 events per backfill request. Do not allow more.
12361239 limit = min(limit, 100)
12511254 )
12521255
12531256 events = await filter_events_for_server(
1254 self._storage_controllers, origin, events
1257 self._storage_controllers, origin, self.server_name, events
12551258 )
12561259
12571260 return events
12821285 await self._event_auth_handler.assert_host_in_room(event.room_id, origin)
12831286
12841287 events = await filter_events_for_server(
1285 self._storage_controllers, origin, [event]
1288 self._storage_controllers, origin, self.server_name, [event]
12861289 )
12871290 event = events[0]
12881291 return event
12951298 latest_events: List[str],
12961299 limit: int,
12971300 ) -> List[EventBase]:
1298 await self._event_auth_handler.assert_host_in_room(room_id, origin)
1301 # We allow partially joined rooms since in this case we are filtering out
1302 # non-local events in `filter_events_for_server`.
1303 await self._event_auth_handler.assert_host_in_room(room_id, origin, True)
12991304
13001305 # Only allow up to 20 events to be retrieved per request.
13011306 limit = min(limit, 20)
13081313 )
13091314
13101315 missing_events = await filter_events_for_server(
1311 self._storage_controllers, origin, missing_events
1316 self._storage_controllers, origin, self.server_name, missing_events
13121317 )
13131318
13141319 return missing_events
15951600 Fetch the complexity of a remote room over federation.
15961601
15971602 Args:
1598 remote_room_hosts (list[str]): The remote servers to ask.
1599 room_id (str): The room ID to ask about.
1603 remote_room_hosts: The remote servers to ask.
1604 room_id: The room ID to ask about.
16001605
16011606 Returns:
16021607 Dict contains the complexity
710710 inviter_display_name: The current display name of the
711711 inviter.
712712 inviter_avatar_url: The URL of the inviter's avatar.
713 id_access_token (str): The access token to authenticate to the identity
713 id_access_token: The access token to authenticate to the identity
714714 server with
715715
716716 Returns:
11341134 )
11351135 state_events = await self.store.get_events_as_list(state_event_ids)
11361136 # Create a StateMap[str]
1137 state_map = {(e.type, e.state_key): e.event_id for e in state_events}
1137 current_state_ids = {
1138 (e.type, e.state_key): e.event_id for e in state_events
1139 }
11381140 # Actually strip down and only use the necessary auth events
11391141 auth_event_ids = self._event_auth_handler.compute_auth_events(
11401142 event=temp_event,
1141 current_state_ids=state_map,
1143 current_state_ids=current_state_ids,
11421144 for_verification=False,
11431145 )
11441146
786786 Must include an ``access_token`` field.
787787
788788 Returns:
789 UserInfo: an object representing the user.
789 an object representing the user.
790790 """
791791 logger.debug("Using the OAuth2 access_token to request userinfo")
792792 metadata = await self.load_metadata()
14341434 localpart: Optional[str]
14351435 confirm_localpart: bool
14361436 display_name: Optional[str]
1437 picture: Optional[str] # may be omitted by older `OidcMappingProviders`
14371438 emails: List[str]
14381439
14391440
15191520 @attr.s(slots=True, frozen=True, auto_attribs=True)
15201521 class JinjaOidcMappingConfig:
15211522 subject_claim: str
1523 picture_claim: str
15221524 localpart_template: Optional[Template]
15231525 display_name_template: Optional[Template]
15241526 email_template: Optional[Template]
15381540 @staticmethod
15391541 def parse_config(config: dict) -> JinjaOidcMappingConfig:
15401542 subject_claim = config.get("subject_claim", "sub")
1543 picture_claim = config.get("picture_claim", "picture")
15411544
15421545 def parse_template_config(option_name: str) -> Optional[Template]:
15431546 if option_name not in config:
15711574
15721575 return JinjaOidcMappingConfig(
15731576 subject_claim=subject_claim,
1577 picture_claim=picture_claim,
15741578 localpart_template=localpart_template,
15751579 display_name_template=display_name_template,
15761580 email_template=email_template,
16101614 if email:
16111615 emails.append(email)
16121616
1617 picture = userinfo.get("picture")
1618
16131619 return UserAttributeDict(
16141620 localpart=localpart,
16151621 display_name=display_name,
16161622 emails=emails,
1623 picture=picture,
16171624 confirm_localpart=self._config.confirm_localpart,
16181625 )
16191626
447447
448448 if pagin_config.from_token:
449449 from_token = pagin_config.from_token
450 elif pagin_config.direction == "f":
451 from_token = (
452 await self.hs.get_event_sources().get_start_token_for_pagination(
453 room_id
454 )
455 )
450456 else:
451457 from_token = (
452458 await self.hs.get_event_sources().get_current_token_for_pagination(
200200 """Get the current presence state for multiple users.
201201
202202 Returns:
203 dict: `user_id` -> `UserPresenceState`
203 A mapping of `user_id` -> `UserPresenceState`
204204 """
205205 states = {}
206206 missing = []
477477 return _NullContextManager()
478478
479479 prev_state = await self.current_state_for_user(user_id)
480 if prev_state != PresenceState.BUSY:
480 if prev_state.state != PresenceState.BUSY:
481481 # We set state here but pass ignore_status_msg = True as we don't want to
482482 # cause the status message to be cleared.
483483 # Note that this causes last_active_ts to be incremented which is not
9191 continue
9292
9393 # Check if these receipts apply to a thread.
94 thread_id = None
9594 data = user_values.get("data", {})
9695 thread_id = data.get("thread_id")
9796 # If the thread ID is invalid, consider it missing.
3737 )
3838 from synapse.appservice import ApplicationService
3939 from synapse.config.server import is_threepid_reserved
40 from synapse.handlers.device import DeviceHandler
4041 from synapse.http.servlet import assert_params_in_dict
4142 from synapse.replication.http.login import RegisterDeviceReplicationServlet
4243 from synapse.replication.http.register import (
840841 refresh_token = None
841842 refresh_token_id = None
842843
844 # This can only run on the main process.
845 assert isinstance(self.device_handler, DeviceHandler)
846
843847 registered_device_id = await self.device_handler.check_device_registered(
844848 user_id,
845849 device_id,
1212 # limitations under the License.
1313 import enum
1414 import logging
15 from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tuple
15 from typing import TYPE_CHECKING, Collection, Dict, FrozenSet, Iterable, List, Optional
1616
1717 import attr
1818
1919 from synapse.api.constants import EventTypes, RelationTypes
2020 from synapse.api.errors import SynapseError
2121 from synapse.events import EventBase, relation_from_event
22 from synapse.logging.context import make_deferred_yieldable, run_in_background
2223 from synapse.logging.opentracing import trace
2324 from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent
2425 from synapse.streams.config import PaginationConfig
25 from synapse.types import JsonDict, Requester, StreamToken, UserID
26 from synapse.types import JsonDict, Requester, UserID
27 from synapse.util.async_helpers import gather_results
2628 from synapse.visibility import filter_events_for_client
2729
2830 if TYPE_CHECKING:
170172 )
171173
172174 return return_value
173
174 async def get_relations_for_event(
175 self,
176 event_id: str,
177 event: EventBase,
178 room_id: str,
179 relation_type: str,
180 ignored_users: FrozenSet[str] = frozenset(),
181 ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]:
182 """Get a list of events which relate to an event, ordered by topological ordering.
183
184 Args:
185 event_id: Fetch events that relate to this event ID.
186 event: The matching EventBase to event_id.
187 room_id: The room the event belongs to.
188 relation_type: The type of relation.
189 ignored_users: The users ignored by the requesting user.
190
191 Returns:
192 List of event IDs that match relations requested. The rows are of
193 the form `{"event_id": "..."}`.
194 """
195
196 # Call the underlying storage method, which is cached.
197 related_events, next_token = await self._main_store.get_relations_for_event(
198 event_id, event, room_id, relation_type, direction="f"
199 )
200
201 # Filter out ignored users and convert to the expected format.
202 related_events = [
203 event for event in related_events if event.sender not in ignored_users
204 ]
205
206 return related_events, next_token
207175
208176 async def redact_events_related_to(
209177 self,
258226 e.msg,
259227 )
260228
261 async def get_annotations_for_event(
262 self,
263 event_id: str,
264 room_id: str,
265 limit: int = 5,
266 ignored_users: FrozenSet[str] = frozenset(),
267 ) -> List[JsonDict]:
268 """Get a list of annotations on the event, grouped by event type and
229 async def get_annotations_for_events(
230 self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
231 ) -> Dict[str, List[JsonDict]]:
232 """Get a list of annotations to the given events, grouped by event type and
269233 aggregation key, sorted by count.
270234
271 This is used e.g. to get the what and how many reactions have happend
235 This is used e.g. to get the what and how many reactions have happened
272236 on an event.
273237
274238 Args:
275 event_id: Fetch events that relate to this event ID.
276 room_id: The room the event belongs to.
277 limit: Only fetch the `limit` groups.
239 event_ids: Fetch events that relate to these event IDs.
278240 ignored_users: The users ignored by the requesting user.
279241
280242 Returns:
281 List of groups of annotations that match. Each row is a dict with
282 `type`, `key` and `count` fields.
243 A map of event IDs to a list of groups of annotations that match.
244 Each entry is a dict with `type`, `key` and `count` fields.
283245 """
284246 # Get the base results for all users.
285 full_results = await self._main_store.get_aggregation_groups_for_event(
286 event_id, room_id, limit
287 )
247 full_results = await self._main_store.get_aggregation_groups_for_events(
248 event_ids
249 )
250
251 # Avoid additional logic if there are no ignored users.
252 if not ignored_users:
253 return {
254 event_id: results
255 for event_id, results in full_results.items()
256 if results
257 }
288258
289259 # Then subtract off the results for any ignored users.
290260 ignored_results = await self._main_store.get_aggregation_groups_for_users(
291 event_id, room_id, limit, ignored_users
292 )
293
294 filtered_results = []
295 for result in full_results:
296 key = (result["type"], result["key"])
297 if key in ignored_results:
298 result = result.copy()
299 result["count"] -= ignored_results[key]
300 if result["count"] <= 0:
301 continue
302 filtered_results.append(result)
261 [event_id for event_id, results in full_results.items() if results],
262 ignored_users,
263 )
264
265 filtered_results = {}
266 for event_id, results in full_results.items():
267 # If no annotations, skip.
268 if not results:
269 continue
270
271 # If there are not ignored results for this event, copy verbatim.
272 if event_id not in ignored_results:
273 filtered_results[event_id] = results
274 continue
275
276 # Otherwise, subtract out the ignored results.
277 event_ignored_results = ignored_results[event_id]
278 for result in results:
279 key = (result["type"], result["key"])
280 if key in event_ignored_results:
281 # Ensure to not modify the cache.
282 result = result.copy()
283 result["count"] -= event_ignored_results[key]
284 if result["count"] <= 0:
285 continue
286 filtered_results.setdefault(event_id, []).append(result)
303287
304288 return filtered_results
289
290 async def get_references_for_events(
291 self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset()
292 ) -> Dict[str, List[_RelatedEvent]]:
293 """Get a list of references to the given events.
294
295 Args:
296 event_ids: Fetch events that relate to this event ID.
297 ignored_users: The users ignored by the requesting user.
298
299 Returns:
300 A map of event IDs to a list related events.
301 """
302
303 related_events = await self._main_store.get_references_for_events(event_ids)
304
305 # Avoid additional logic if there are no ignored users.
306 if not ignored_users:
307 return {
308 event_id: results
309 for event_id, results in related_events.items()
310 if results
311 }
312
313 # Filter out ignored users.
314 results = {}
315 for event_id, events in related_events.items():
316 # If no references, skip.
317 if not events:
318 continue
319
320 # Filter ignored users out.
321 events = [event for event in events if event.sender not in ignored_users]
322 # If there are no events left, skip this event.
323 if not events:
324 continue
325
326 results[event_id] = events
327
328 return results
305329
306330 async def _get_threads_for_events(
307331 self,
365389 results = {}
366390
367391 for event_id, summary in summaries.items():
368 if summary:
369 thread_count, latest_thread_event = summary
370
371 # Subtract off the count of any ignored users.
372 for ignored_user in ignored_users:
373 thread_count -= ignored_results.get((event_id, ignored_user), 0)
374
375 # This is gnarly, but if the latest event is from an ignored user,
376 # attempt to find one that isn't from an ignored user.
377 if latest_thread_event.sender in ignored_users:
378 room_id = latest_thread_event.room_id
379
380 # If the root event is not found, something went wrong, do
381 # not include a summary of the thread.
382 event = await self._event_handler.get_event(user, room_id, event_id)
383 if event is None:
384 continue
385
386 potential_events, _ = await self.get_relations_for_event(
387 event_id,
388 event,
389 room_id,
390 RelationTypes.THREAD,
391 ignored_users,
392 # If no thread, skip.
393 if not summary:
394 continue
395
396 thread_count, latest_thread_event = summary
397
398 # Subtract off the count of any ignored users.
399 for ignored_user in ignored_users:
400 thread_count -= ignored_results.get((event_id, ignored_user), 0)
401
402 # This is gnarly, but if the latest event is from an ignored user,
403 # attempt to find one that isn't from an ignored user.
404 if latest_thread_event.sender in ignored_users:
405 room_id = latest_thread_event.room_id
406
407 # If the root event is not found, something went wrong, do
408 # not include a summary of the thread.
409 event = await self._event_handler.get_event(user, room_id, event_id)
410 if event is None:
411 continue
412
413 # Attempt to find another event to use as the latest event.
414 potential_events, _ = await self._main_store.get_relations_for_event(
415 event_id, event, room_id, RelationTypes.THREAD, direction="f"
416 )
417
418 # Filter out ignored users.
419 potential_events = [
420 event
421 for event in potential_events
422 if event.sender not in ignored_users
423 ]
424
425 # If all found events are from ignored users, do not include
426 # a summary of the thread.
427 if not potential_events:
428 continue
429
430 # The *last* event returned is the one that is cared about.
431 event = await self._event_handler.get_event(
432 user, room_id, potential_events[-1].event_id
433 )
434 # It is unexpected that the event will not exist.
435 if event is None:
436 logger.warning(
437 "Unable to fetch latest event in a thread with event ID: %s",
438 potential_events[-1].event_id,
392439 )
393
394 # If all found events are from ignored users, do not include
395 # a summary of the thread.
396 if not potential_events:
397 continue
398
399 # The *last* event returned is the one that is cared about.
400 event = await self._event_handler.get_event(
401 user, room_id, potential_events[-1].event_id
402 )
403 # It is unexpected that the event will not exist.
404 if event is None:
405 logger.warning(
406 "Unable to fetch latest event in a thread with event ID: %s",
407 potential_events[-1].event_id,
408 )
409 continue
410 latest_thread_event = event
411
412 results[event_id] = _ThreadAggregation(
413 latest_event=latest_thread_event,
414 count=thread_count,
415 # If there's a thread summary it must also exist in the
416 # participated dictionary.
417 current_user_participated=events_by_id[event_id].sender == user_id
418 or participated[event_id],
419 )
440 continue
441 latest_thread_event = event
442
443 results[event_id] = _ThreadAggregation(
444 latest_event=latest_thread_event,
445 count=thread_count,
446 # If there's a thread summary it must also exist in the
447 # participated dictionary.
448 current_user_participated=events_by_id[event_id].sender == user_id
449 or participated[event_id],
450 )
420451
421452 return results
422453
495526 # (as that is what makes it part of the thread).
496527 relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD
497528
498 # Fetch other relations per event.
499 for event in events_by_id.values():
500 # Fetch any annotations (ie, reactions) to bundle with this event.
501 annotations = await self.get_annotations_for_event(
502 event.event_id, event.room_id, ignored_users=ignored_users
503 )
504 if annotations:
505 results.setdefault(
506 event.event_id, BundledAggregations()
507 ).annotations = {"chunk": annotations}
508
509 # Fetch any references to bundle with this event.
510 references, next_token = await self.get_relations_for_event(
511 event.event_id,
512 event,
513 event.room_id,
514 RelationTypes.REFERENCE,
515 ignored_users=ignored_users,
516 )
517 if references:
518 aggregations = results.setdefault(event.event_id, BundledAggregations())
519 aggregations.references = {
520 "chunk": [{"event_id": ev.event_id} for ev in references]
521 }
522
523 if next_token:
524 aggregations.references["next_batch"] = await next_token.to_string(
525 self._main_store
526 )
527
528 # Fetch any edits (but not for redacted events).
529 #
530 # Note that there is no use in limiting edits by ignored users since the
531 # parent event should be ignored in the first place if the user is ignored.
532 edits = await self._main_store.get_applicable_edits(
533 [
534 event_id
535 for event_id, event in events_by_id.items()
536 if not event.internal_metadata.is_redacted()
537 ]
538 )
539 for event_id, edit in edits.items():
540 results.setdefault(event_id, BundledAggregations()).replace = edit
529 async def _fetch_annotations() -> None:
530 """Fetch any annotations (ie, reactions) to bundle with this event."""
531 annotations_by_event_id = await self.get_annotations_for_events(
532 events_by_id.keys(), ignored_users=ignored_users
533 )
534 for event_id, annotations in annotations_by_event_id.items():
535 if annotations:
536 results.setdefault(event_id, BundledAggregations()).annotations = {
537 "chunk": annotations
538 }
539
540 async def _fetch_references() -> None:
541 """Fetch any references to bundle with this event."""
542 references_by_event_id = await self.get_references_for_events(
543 events_by_id.keys(), ignored_users=ignored_users
544 )
545 for event_id, references in references_by_event_id.items():
546 if references:
547 results.setdefault(event_id, BundledAggregations()).references = {
548 "chunk": [{"event_id": ev.event_id} for ev in references]
549 }
550
551 async def _fetch_edits() -> None:
552 """
553 Fetch any edits (but not for redacted events).
554
555 Note that there is no use in limiting edits by ignored users since the
556 parent event should be ignored in the first place if the user is ignored.
557 """
558 edits = await self._main_store.get_applicable_edits(
559 [
560 event_id
561 for event_id, event in events_by_id.items()
562 if not event.internal_metadata.is_redacted()
563 ]
564 )
565 for event_id, edit in edits.items():
566 results.setdefault(event_id, BundledAggregations()).replace = edit
567
568 # Parallelize the calls for annotations, references, and edits since they
569 # are unrelated.
570 await make_deferred_yieldable(
571 gather_results(
572 (
573 run_in_background(_fetch_annotations),
574 run_in_background(_fetch_references),
575 run_in_background(_fetch_edits),
576 )
577 )
578 )
541579
542580 return results
543581
570608 room_id, requester, allow_departed_users=True
571609 )
572610
573 # Note that ignored users are not passed into get_relations_for_event
611 # Note that ignored users are not passed into get_threads
574612 # below. Ignored users are handled in filter_events_for_client (and by
575613 # not passing them in here we should get a better cache hit rate).
576614 thread_roots, next_batch = await self._main_store.get_threads(
440440 client_redirect_url: where the client wants to redirect to
441441
442442 Returns:
443 dict: A dict containing new user attributes. Possible keys:
443 A dict containing new user attributes. Possible keys:
444444 * mxid_localpart (str): Required. The localpart of the user's mxid
445445 * displayname (str): The displayname of the user
446446 * emails (list[str]): Any emails for the user
482482 Args:
483483 config: A dictionary containing configuration options for this provider
484484 Returns:
485 SamlConfig: A custom config object for this module
485 A custom config object for this module
486486 """
487487 # Parse config options and use defaults where necessary
488488 mxid_source_attribute = config.get("mxid_source_attribute", "uid")
1414 from typing import TYPE_CHECKING, Optional
1515
1616 from synapse.api.errors import Codes, StoreError, SynapseError
17 from synapse.handlers.device import DeviceHandler
1718 from synapse.types import Requester
1819
1920 if TYPE_CHECKING:
2829 def __init__(self, hs: "HomeServer"):
2930 self.store = hs.get_datastores().main
3031 self._auth_handler = hs.get_auth_handler()
31 self._device_handler = hs.get_device_handler()
32 # This can only be instantiated on the main process.
33 device_handler = hs.get_device_handler()
34 assert isinstance(device_handler, DeviceHandler)
35 self._device_handler = device_handler
3236
3337 async def set_password(
3438 self,
1111 # See the License for the specific language governing permissions and
1212 # limitations under the License.
1313 import abc
14 import hashlib
15 import io
1416 import logging
1517 from typing import (
1618 TYPE_CHECKING,
3638 from synapse.api.constants import LoginType
3739 from synapse.api.errors import Codes, NotFoundError, RedirectException, SynapseError
3840 from synapse.config.sso import SsoAttributeRequirement
41 from synapse.handlers.device import DeviceHandler
3942 from synapse.handlers.register import init_counters_for_auth_provider
4043 from synapse.handlers.ui_auth import UIAuthSessionDataConstants
4144 from synapse.http import get_request_user_agent
136139 localpart: Optional[str]
137140 confirm_localpart: bool = False
138141 display_name: Optional[str] = None
142 picture: Optional[str] = None
139143 emails: Collection[str] = attr.Factory(list)
140144
141145
194198 self._error_template = hs.config.sso.sso_error_template
195199 self._bad_user_template = hs.config.sso.sso_auth_bad_user_template
196200 self._profile_handler = hs.get_profile_handler()
201 self._media_repo = (
202 hs.get_media_repository() if hs.config.media.can_load_media_repo else None
203 )
204 self._http_client = hs.get_proxied_blacklisted_http_client()
197205
198206 # The following template is shown after a successful user interactive
199207 # authentication session. It tells the user they can close the window.
493501 await self._profile_handler.set_displayname(
494502 user_id_obj, requester, attributes.display_name, True
495503 )
504 if attributes.picture:
505 await self.set_avatar(user_id, attributes.picture)
496506
497507 await self._auth_handler.complete_sso_login(
498508 user_id,
701711 await self._store.record_user_external_id(
702712 auth_provider_id, remote_user_id, registered_user_id
703713 )
714
715 # Set avatar, if available
716 if attributes.picture:
717 await self.set_avatar(registered_user_id, attributes.picture)
718
704719 return registered_user_id
720
721 async def set_avatar(self, user_id: str, picture_https_url: str) -> bool:
722 """Set avatar of the user.
723
724 This downloads the image file from the URL provided, stores that in
725 the media repository and then sets the avatar on the user's profile.
726
727 It can detect if the same image is being saved again and bails early by storing
728 the hash of the file in the `upload_name` of the avatar image.
729
730 Currently, it only supports server configurations which run the media repository
731 within the same process.
732
733 It silently fails and logs a warning by raising an exception and catching it
734 internally if:
735 * it is unable to fetch the image itself (non 200 status code) or
736 * the image supplied is bigger than max allowed size or
737 * the image type is not one of the allowed image types.
738
739 Args:
740 user_id: matrix user ID in the form @localpart:domain as a string.
741
742 picture_https_url: HTTPS url for the picture image file.
743
744 Returns: `True` if the user's avatar has been successfully set to the image at
745 `picture_https_url`.
746 """
747 if self._media_repo is None:
748 logger.info(
749 "failed to set user avatar because out-of-process media repositories "
750 "are not supported yet "
751 )
752 return False
753
754 try:
755 uid = UserID.from_string(user_id)
756
757 def is_allowed_mime_type(content_type: str) -> bool:
758 if (
759 self._profile_handler.allowed_avatar_mimetypes
760 and content_type
761 not in self._profile_handler.allowed_avatar_mimetypes
762 ):
763 return False
764 return True
765
766 # download picture, enforcing size limit & mime type check
767 picture = io.BytesIO()
768
769 content_length, headers, uri, code = await self._http_client.get_file(
770 url=picture_https_url,
771 output_stream=picture,
772 max_size=self._profile_handler.max_avatar_size,
773 is_allowed_content_type=is_allowed_mime_type,
774 )
775
776 if code != 200:
777 raise Exception(
778 "GET request to download sso avatar image returned {}".format(code)
779 )
780
781 # upload name includes hash of the image file's content so that we can
782 # easily check if it requires an update or not, the next time user logs in
783 upload_name = "sso_avatar_" + hashlib.sha256(picture.read()).hexdigest()
784
785 # bail if user already has the same avatar
786 profile = await self._profile_handler.get_profile(user_id)
787 if profile["avatar_url"] is not None:
788 server_name = profile["avatar_url"].split("/")[-2]
789 media_id = profile["avatar_url"].split("/")[-1]
790 if server_name == self._server_name:
791 media = await self._media_repo.store.get_local_media(media_id)
792 if media is not None and upload_name == media["upload_name"]:
793 logger.info("skipping saving the user avatar")
794 return True
795
796 # store it in media repository
797 avatar_mxc_url = await self._media_repo.create_content(
798 media_type=headers[b"Content-Type"][0].decode("utf-8"),
799 upload_name=upload_name,
800 content=picture,
801 content_length=content_length,
802 auth_user=uid,
803 )
804
805 # save it as user avatar
806 await self._profile_handler.set_avatar_url(
807 uid,
808 create_requester(uid),
809 str(avatar_mxc_url),
810 )
811
812 logger.info("successfully saved the user avatar")
813 return True
814 except Exception:
815 logger.warning("failed to save the user avatar")
816 return False
705817
706818 async def complete_sso_ui_auth_request(
707819 self,
10341146 ) -> None:
10351147 """Revoke any devices and in-flight logins tied to a provider session.
10361148
1149 Can only be called from the main process.
1150
10371151 Args:
10381152 auth_provider_id: A unique identifier for this SSO provider, e.g.
10391153 "oidc" or "saml".
10411155 expected_user_id: The user we're expecting to logout. If set, it will ignore
10421156 sessions belonging to other users and log an error.
10431157 """
1158
1159 # It is expected that this is the main process.
1160 assert isinstance(
1161 self._device_handler, DeviceHandler
1162 ), "revoking SSO sessions can only be called on the main process"
1163
10441164 # Invalidate any running user-mapping sessions
10451165 to_delete = []
10461166 for session_id, session in self._username_mapping_sessions.items():
14251425
14261426 logger.debug("Fetching OTK data")
14271427 device_id = sync_config.device_id
1428 one_time_key_counts: JsonDict = {}
1428 one_time_keys_count: JsonDict = {}
14291429 unused_fallback_key_types: List[str] = []
14301430 if device_id:
14311431 # TODO: We should have a way to let clients differentiate between the states of:
14321432 # * no change in OTK count since the provided since token
14331433 # * the server has zero OTKs left for this device
14341434 # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298
1435 one_time_key_counts = await self.store.count_e2e_one_time_keys(
1435 one_time_keys_count = await self.store.count_e2e_one_time_keys(
14361436 user_id, device_id
14371437 )
14381438 unused_fallback_key_types = (
14621462 archived=sync_result_builder.archived,
14631463 to_device=sync_result_builder.to_device,
14641464 device_lists=device_lists,
1465 device_one_time_keys_count=one_time_key_counts,
1465 device_one_time_keys_count=one_time_keys_count,
14661466 device_unused_fallback_key_types=unused_fallback_key_types,
14671467 next_batch=sync_result_builder.now_token,
14681468 )
4444
4545 Args:
4646 hs: homeserver
47 handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred):
48 function to be called to handle the request.
47 handler: function to be called to handle the request.
4948 """
5049 super().__init__()
5150 self._handler = handler
154154 a file for a file upload). Or None if the request is to have
155155 no body.
156156 Returns:
157 Deferred[twisted.web.iweb.IResponse]:
158 fires when the header of the response has been received (regardless of the
159 response status code). Fails if there is any problem which prevents that
160 response from being received (including problems that prevent the request
161 from being sent).
157 A deferred which fires when the header of the response has been received
158 (regardless of the response status code). Fails if there is any problem
159 which prevents that response from being received (including problems that
160 prevent the request from being sent).
162161 """
163162 # We use urlparse as that will set `port` to None if there is no
164163 # explicit port.
950950
951951 args: query params
952952 Returns:
953 dict|list: Succeeds when we get a 2xx HTTP response. The
954 result will be the decoded JSON body.
953 Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body.
955954
956955 Raises:
957956 HttpResponseException: If we get an HTTP response code >= 300
3333 )
3434 from twisted.web.error import SchemeNotSupported
3535 from twisted.web.http_headers import Headers
36 from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS
36 from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse
3737
3838 from synapse.http import redact_uri
3939 from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials
133133 uri: bytes,
134134 headers: Optional[Headers] = None,
135135 bodyProducer: Optional[IBodyProducer] = None,
136 ) -> defer.Deferred:
136 ) -> "defer.Deferred[IResponse]":
137137 """
138138 Issue a request to the server indicated by the given uri.
139139
156156 a file upload). Or, None if the request is to have no body.
157157
158158 Returns:
159 Deferred[IResponse]: completes when the header of the response has
160 been received (regardless of the response status code).
161
162 Can fail with:
163 SchemeNotSupported: if the uri is not http or https
164
165 twisted.internet.error.TimeoutError if the server we are connecting
166 to (proxy or destination) does not accept a connection before
167 connectTimeout.
168
169 ... other things too.
159 A deferred which completes when the header of the response has
160 been received (regardless of the response status code).
161
162 Can fail with:
163 SchemeNotSupported: if the uri is not http or https
164
165 twisted.internet.error.TimeoutError if the server we are connecting
166 to (proxy or destination) does not accept a connection before
167 connectTimeout.
168
169 ... other things too.
170170 """
171171 uri = uri.strip()
172172 if not _VALID_URI.match(uri):
266266 request. The first argument will be the request object and
267267 subsequent arguments will be any matched groups from the regex.
268268 This should return either tuple of (code, response), or None.
269 servlet_classname (str): The name of the handler to be used in prometheus
269 servlet_classname: The name of the handler to be used in prometheus
270270 and opentracing logs.
271271 """
272272
399399 be sure to call finished_processing.
400400
401401 Args:
402 servlet_name (str): the name of the servlet which will be
402 servlet_name: the name of the servlet which will be
403403 processing this request. This is used in the metrics.
404404
405405 It is possible to update this afterwards by updating
116116 """Create a new ContextResourceUsage
117117
118118 Args:
119 copy_from (ContextResourceUsage|None): if not None, an object to
120 copy stats from
119 copy_from: if not None, an object to copy stats from
121120 """
122121 if copy_from is None:
123122 self.reset()
161160 """Add another ContextResourceUsage's stats to this one's.
162161
163162 Args:
164 other (ContextResourceUsage): the other resource usage object
163 other: the other resource usage object
165164 """
166165 self.ru_utime += other.ru_utime
167166 self.ru_stime += other.ru_stime
341340 called directly.
342341
343342 Returns:
344 LoggingContext: the current logging context
343 The current logging context
345344 """
346345 warnings.warn(
347346 "synapse.logging.context.LoggingContext.current_context() is deprecated "
361360 called directly.
362361
363362 Args:
364 context(LoggingContext): The context to activate.
363 context: The context to activate.
364
365365 Returns:
366366 The context that was previously active
367367 """
473473 """Get resources used by this logcontext so far.
474474
475475 Returns:
476 ContextResourceUsage: a *copy* of the object tracking resource
477 usage so far
476 A *copy* of the object tracking resource usage so far
478477 """
479478 # we always return a copy, for consistency
480479 res = self._resource_usage.copy()
662661 def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel:
663662 """Set the current logging context in thread local storage
664663 Args:
665 context(LoggingContext): The context to activate.
664 context: The context to activate.
665
666666 Returns:
667667 The context that was previously active
668668 """
699699 suffix: suffix to add to the parent context's 'name'.
700700
701701 Returns:
702 LoggingContext: new logging context.
702 A new logging context.
703703 """
704704 curr_context = current_context()
705705 if not curr_context:
897897 on it.
898898
899899 Args:
900 reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
901 the Deferred will be invoked, and whose threadpool we should use for the
902 function.
900 reactor: The reactor in whose main thread the Deferred will be invoked,
901 and whose threadpool we should use for the function.
903902
904903 Normally this will be hs.get_reactor().
905904
906 f (callable): The function to call.
905 f: The function to call.
907906
908907 args: positional arguments to pass to f.
909908
910909 kwargs: keyword arguments to pass to f.
911910
912911 Returns:
913 Deferred: A Deferred which fires a callback with the result of `f`, or an
912 A Deferred which fires a callback with the result of `f`, or an
914913 errback if `f` throws an exception.
915914 """
916915 return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs)
938937 on it.
939938
940939 Args:
941 reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread
942 the Deferred will be invoked. Normally this will be hs.get_reactor().
943
944 threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for
945 running `f`. Normally this will be hs.get_reactor().getThreadPool().
946
947 f (callable): The function to call.
940 reactor: The reactor in whose main thread the Deferred will be invoked.
941 Normally this will be hs.get_reactor().
942
943 threadpool: The threadpool to use for running `f`. Normally this will be
944 hs.get_reactor().getThreadPool().
945
946 f: The function to call.
948947
949948 args: positional arguments to pass to f.
950949
951950 kwargs: keyword arguments to pass to f.
952951
953952 Returns:
954 Deferred: A Deferred which fires a callback with the result of `f`, or an
953 A Deferred which fires a callback with the result of `f`, or an
955954 errback if `f` throws an exception.
956955 """
957956 curr_context = current_context()
720720 destination: address of entity receiving the span context. Must be given unless
721721 check_destination is False. The context will only be injected if the
722722 destination matches the opentracing whitelist
723 check_destination (bool): If false, destination will be ignored and the context
723 check_destination: If false, destination will be ignored and the context
724724 will always be injected.
725725
726726 Note:
779779 destination: the name of the remote server.
780780
781781 Returns:
782 dict: the active span's context if opentracing is enabled, otherwise empty.
782 the active span's context if opentracing is enabled, otherwise empty.
783783 """
784784
785785 if destination and not whitelisted_homeserver(destination):
4646 # This module is imported for its side effects; flake8 needn't warn that it's unused.
4747 import synapse.metrics._reactor_metrics # noqa: F401
4848 from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager
49 from synapse.metrics._legacy_exposition import (
50 MetricsResource,
51 generate_latest,
52 start_http_server,
53 )
49 from synapse.metrics._twisted_exposition import MetricsResource, generate_latest
5450 from synapse.metrics._types import Collector
5551 from synapse.util import SYNAPSE_VERSION
5652
473469 "Collector",
474470 "MetricsResource",
475471 "generate_latest",
476 "start_http_server",
477472 "LaterGauge",
478473 "InFlightGauge",
479474 "GaugeBucketCollector",
+0
-288
synapse/metrics/_legacy_exposition.py less more
0 # Copyright 2015-2019 Prometheus Python Client Developers
1 # Copyright 2019 Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This code is based off `prometheus_client/exposition.py` from version 0.7.1.
17
18 Due to the renaming of metrics in prometheus_client 0.4.0, this customised
19 vendoring of the code will emit both the old versions that Synapse dashboards
20 expect, and the newer "best practice" version of the up-to-date official client.
21 """
22 import logging
23 import math
24 import threading
25 from http.server import BaseHTTPRequestHandler, HTTPServer
26 from socketserver import ThreadingMixIn
27 from typing import Any, Dict, List, Type, Union
28 from urllib.parse import parse_qs, urlparse
29
30 from prometheus_client import REGISTRY, CollectorRegistry
31 from prometheus_client.core import Sample
32
33 from twisted.web.resource import Resource
34 from twisted.web.server import Request
35
36 logger = logging.getLogger(__name__)
37 CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
38
39
40 def floatToGoString(d: Union[int, float]) -> str:
41 d = float(d)
42 if d == math.inf:
43 return "+Inf"
44 elif d == -math.inf:
45 return "-Inf"
46 elif math.isnan(d):
47 return "NaN"
48 else:
49 s = repr(d)
50 dot = s.find(".")
51 # Go switches to exponents sooner than Python.
52 # We only need to care about positive values for le/quantile.
53 if d > 0 and dot > 6:
54 mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.")
55 return f"{mantissa}e+0{dot - 1}"
56 return s
57
58
59 def sample_line(line: Sample, name: str) -> str:
60 if line.labels:
61 labelstr = "{{{0}}}".format(
62 ",".join(
63 [
64 '{}="{}"'.format(
65 k,
66 v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""),
67 )
68 for k, v in sorted(line.labels.items())
69 ]
70 )
71 )
72 else:
73 labelstr = ""
74 timestamp = ""
75 if line.timestamp is not None:
76 # Convert to milliseconds.
77 timestamp = f" {int(float(line.timestamp) * 1000):d}"
78 return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp)
79
80
81 # Mapping from new metric names to legacy metric names.
82 # We translate these back to their old names when exposing them through our
83 # legacy vendored exporter.
84 # Only this legacy exposition module applies these name changes.
85 LEGACY_METRIC_NAMES = {
86 "synapse_util_caches_cache_hits": "synapse_util_caches_cache:hits",
87 "synapse_util_caches_cache_size": "synapse_util_caches_cache:size",
88 "synapse_util_caches_cache_evicted_size": "synapse_util_caches_cache:evicted_size",
89 "synapse_util_caches_cache": "synapse_util_caches_cache:total",
90 "synapse_util_caches_response_cache_size": "synapse_util_caches_response_cache:size",
91 "synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits",
92 "synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size",
93 "synapse_util_caches_response_cache": "synapse_util_caches_response_cache:total",
94 "synapse_federation_client_sent_pdu_destinations": "synapse_federation_client_sent_pdu_destinations:total",
95 "synapse_federation_client_sent_pdu_destinations_count": "synapse_federation_client_sent_pdu_destinations:count",
96 "synapse_admin_mau_current": "synapse_admin_mau:current",
97 "synapse_admin_mau_max": "synapse_admin_mau:max",
98 "synapse_admin_mau_registered_reserved_users": "synapse_admin_mau:registered_reserved_users",
99 }
100
101
102 def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> bytes:
103 """
104 Generate metrics in legacy format. Modern metrics are generated directly
105 by prometheus-client.
106 """
107
108 output = []
109
110 for metric in registry.collect():
111 if not metric.samples:
112 # No samples, don't bother.
113 continue
114
115 # Translate to legacy metric name if it has one.
116 mname = LEGACY_METRIC_NAMES.get(metric.name, metric.name)
117 mnewname = metric.name
118 mtype = metric.type
119
120 # OpenMetrics -> Prometheus
121 if mtype == "counter":
122 mnewname = mnewname + "_total"
123 elif mtype == "info":
124 mtype = "gauge"
125 mnewname = mnewname + "_info"
126 elif mtype == "stateset":
127 mtype = "gauge"
128 elif mtype == "gaugehistogram":
129 mtype = "histogram"
130 elif mtype == "unknown":
131 mtype = "untyped"
132
133 # Output in the old format for compatibility.
134 if emit_help:
135 output.append(
136 "# HELP {} {}\n".format(
137 mname,
138 metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
139 )
140 )
141 output.append(f"# TYPE {mname} {mtype}\n")
142
143 om_samples: Dict[str, List[str]] = {}
144 for s in metric.samples:
145 for suffix in ["_created", "_gsum", "_gcount"]:
146 if s.name == mname + suffix:
147 # OpenMetrics specific sample, put in a gauge at the end.
148 # (these come from gaugehistograms which don't get renamed,
149 # so no need to faff with mnewname)
150 om_samples.setdefault(suffix, []).append(sample_line(s, s.name))
151 break
152 else:
153 newname = s.name.replace(mnewname, mname)
154 if ":" in newname and newname.endswith("_total"):
155 newname = newname[: -len("_total")]
156 output.append(sample_line(s, newname))
157
158 for suffix, lines in sorted(om_samples.items()):
159 if emit_help:
160 output.append(
161 "# HELP {}{} {}\n".format(
162 mname,
163 suffix,
164 metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
165 )
166 )
167 output.append(f"# TYPE {mname}{suffix} gauge\n")
168 output.extend(lines)
169
170 # Get rid of the weird colon things while we're at it
171 if mtype == "counter":
172 mnewname = mnewname.replace(":total", "")
173 mnewname = mnewname.replace(":", "_")
174
175 if mname == mnewname:
176 continue
177
178 # Also output in the new format, if it's different.
179 if emit_help:
180 output.append(
181 "# HELP {} {}\n".format(
182 mnewname,
183 metric.documentation.replace("\\", r"\\").replace("\n", r"\n"),
184 )
185 )
186 output.append(f"# TYPE {mnewname} {mtype}\n")
187
188 for s in metric.samples:
189 # Get rid of the OpenMetrics specific samples (we should already have
190 # dealt with them above anyway.)
191 for suffix in ["_created", "_gsum", "_gcount"]:
192 if s.name == mname + suffix:
193 break
194 else:
195 sample_name = LEGACY_METRIC_NAMES.get(s.name, s.name)
196 output.append(
197 sample_line(s, sample_name.replace(":total", "").replace(":", "_"))
198 )
199
200 return "".join(output).encode("utf-8")
201
202
203 class MetricsHandler(BaseHTTPRequestHandler):
204 """HTTP handler that gives metrics from ``REGISTRY``."""
205
206 registry = REGISTRY
207
208 def do_GET(self) -> None:
209 registry = self.registry
210 params = parse_qs(urlparse(self.path).query)
211
212 if "help" in params:
213 emit_help = True
214 else:
215 emit_help = False
216
217 try:
218 output = generate_latest(registry, emit_help=emit_help)
219 except Exception:
220 self.send_error(500, "error generating metric output")
221 raise
222 try:
223 self.send_response(200)
224 self.send_header("Content-Type", CONTENT_TYPE_LATEST)
225 self.send_header("Content-Length", str(len(output)))
226 self.end_headers()
227 self.wfile.write(output)
228 except BrokenPipeError as e:
229 logger.warning(
230 "BrokenPipeError when serving metrics (%s). Did Prometheus restart?", e
231 )
232
233 def log_message(self, format: str, *args: Any) -> None:
234 """Log nothing."""
235
236 @classmethod
237 def factory(cls, registry: CollectorRegistry) -> Type:
238 """Returns a dynamic MetricsHandler class tied
239 to the passed registry.
240 """
241 # This implementation relies on MetricsHandler.registry
242 # (defined above and defaulted to REGISTRY).
243
244 # As we have unicode_literals, we need to create a str()
245 # object for type().
246 cls_name = str(cls.__name__)
247 MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry})
248 return MyMetricsHandler
249
250
251 class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer):
252 """Thread per request HTTP server."""
253
254 # Make worker threads "fire and forget". Beginning with Python 3.7 this
255 # prevents a memory leak because ``ThreadingMixIn`` starts to gather all
256 # non-daemon threads in a list in order to join on them at server close.
257 # Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the
258 # same as Python 3.7's ``ThreadingHTTPServer``.
259 daemon_threads = True
260
261
262 def start_http_server(
263 port: int, addr: str = "", registry: CollectorRegistry = REGISTRY
264 ) -> None:
265 """Starts an HTTP server for prometheus metrics as a daemon thread"""
266 CustomMetricsHandler = MetricsHandler.factory(registry)
267 httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler)
268 t = threading.Thread(target=httpd.serve_forever)
269 t.daemon = True
270 t.start()
271
272
273 class MetricsResource(Resource):
274 """
275 Twisted ``Resource`` that serves prometheus metrics.
276 """
277
278 isLeaf = True
279
280 def __init__(self, registry: CollectorRegistry = REGISTRY):
281 self.registry = registry
282
283 def render_GET(self, request: Request) -> bytes:
284 request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
285 response = generate_latest(self.registry)
286 request.setHeader(b"Content-Length", str(len(response)))
287 return response
0 # Copyright 2015-2019 Prometheus Python Client Developers
1 # Copyright 2019 Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from prometheus_client import REGISTRY, CollectorRegistry, generate_latest
16
17 from twisted.web.resource import Resource
18 from twisted.web.server import Request
19
20 CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8"
21
22
23 class MetricsResource(Resource):
24 """
25 Twisted ``Resource`` that serves prometheus metrics.
26 """
27
28 isLeaf = True
29
30 def __init__(self, registry: CollectorRegistry = REGISTRY):
31 self.registry = registry
32
33 def render_GET(self, request: Request) -> bytes:
34 request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii"))
35 response = generate_latest(self.registry)
36 request.setHeader(b"Content-Length", str(len(response)))
37 return response
5353
5454 async def setup(self) -> None:
5555 """Keep the gauges for common usage metrics up to date."""
56 await self._update_gauges()
56 run_as_background_process(
57 desc="common_usage_metrics_update_gauges", func=self._update_gauges
58 )
5759 self._clock.looping_call(
5860 run_as_background_process,
5961 5 * 60 * 1000,
8585 ON_LOGGED_OUT_CALLBACK,
8686 AuthHandler,
8787 )
88 from synapse.handlers.device import DeviceHandler
8889 from synapse.handlers.push_rules import RuleSpec, check_actions
8990 from synapse.http.client import SimpleHttpClient
9091 from synapse.http.server import (
206207 self._registration_handler = hs.get_registration_handler()
207208 self._send_email_handler = hs.get_send_email_handler()
208209 self._push_rules_handler = hs.get_push_rules_handler()
210 self._device_handler = hs.get_device_handler()
209211 self.custom_template_dir = hs.config.server.custom_template_directory
210212
211213 try:
783785 ) -> Generator["defer.Deferred[Any]", Any, None]:
784786 """Invalidate an access token for a user
785787
788 Can only be called from the main process.
789
786790 Added in Synapse v0.25.0.
787791
788792 Args:
789 access_token(str): access token
793 access_token: access token
790794
791795 Returns:
792796 twisted.internet.defer.Deferred - resolves once the access token
795799 Raises:
796800 synapse.api.errors.AuthError: the access token is invalid
797801 """
802 assert isinstance(
803 self._device_handler, DeviceHandler
804 ), "invalidate_access_token can only be called on the main process"
805
798806 # see if the access token corresponds to a device
799807 user_info = yield defer.ensureDeferred(
800808 self._auth.get_user_by_access_token(access_token)
804812 if device_id:
805813 # delete the device, which will also delete its access tokens
806814 yield defer.ensureDeferred(
807 self._hs.get_device_handler().delete_devices(user_id, [device_id])
815 self._device_handler.delete_devices(user_id, [device_id])
808816 )
809817 else:
810818 # no associated device. Just delete the access token.
831839 **kwargs: named args to be passed to func
832840
833841 Returns:
834 Deferred[object]: result of func
842 Result of func
835843 """
836844 # type-ignore: See https://github.com/python/mypy/issues/8862
837845 return defer.ensureDeferred(
923931 to represent 'any') of the room state to acquire.
924932
925933 Returns:
926 twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]:
927 The filtered state events in the room.
934 The filtered state events in the room.
928935 """
929936 state_ids = yield defer.ensureDeferred(
930937 self._storage_controllers.state.get_current_state_ids(
2828 from prometheus_client import Counter
2929
3030 from synapse.api.constants import MAIN_TIMELINE, EventTypes, Membership, RelationTypes
31 from synapse.api.room_versions import PushRuleRoomFlag, RoomVersion
3132 from synapse.event_auth import auth_types_for_event, get_user_power_level
3233 from synapse.events import EventBase, relation_from_event
3334 from synapse.events.snapshot import EventContext
337338 for user_id, level in notification_levels.items():
338339 notification_levels[user_id] = int(level)
339340
341 room_version_features = event.room_version.msc3931_push_features
342 if not room_version_features:
343 room_version_features = []
344
340345 evaluator = PushRuleEvaluator(
341 _flatten_dict(event),
346 _flatten_dict(event, room_version=event.room_version),
342347 room_member_count,
343348 sender_power_level,
344349 notification_levels,
345350 related_events,
346351 self._related_event_match_enabled,
352 room_version_features,
353 self.hs.config.experimental.msc1767_enabled, # MSC3931 flag
347354 )
348355
349356 users = rules_by_user.keys()
419426
420427 def _flatten_dict(
421428 d: Union[EventBase, Mapping[str, Any]],
429 room_version: Optional[RoomVersion] = None,
422430 prefix: Optional[List[str]] = None,
423431 result: Optional[Dict[str, str]] = None,
424432 ) -> Dict[str, str]:
430438 if isinstance(value, str):
431439 result[".".join(prefix + [key])] = value.lower()
432440 elif isinstance(value, Mapping):
441 # do not set `room_version` due to recursion considerations below
433442 _flatten_dict(value, prefix=(prefix + [key]), result=result)
434443
444 # `room_version` should only ever be set when looking at the top level of an event
445 if (
446 room_version is not None
447 and PushRuleRoomFlag.EXTENSIBLE_EVENTS in room_version.msc3931_push_features
448 and isinstance(d, EventBase)
449 ):
450 # Room supports extensible events: replace `content.body` with the plain text
451 # representation from `m.markup`, as per MSC1767.
452 markup = d.get("content").get("m.markup")
453 if room_version.identifier.startswith("org.matrix.msc1767."):
454 markup = d.get("content").get("org.matrix.msc1767.markup")
455 if markup is not None and isinstance(markup, list):
456 text = ""
457 for rep in markup:
458 if not isinstance(rep, dict):
459 # invalid markup - skip all processing
460 break
461 if rep.get("mimetype", "text/plain") == "text/plain":
462 rep_text = rep.get("body")
463 if rep_text is not None and isinstance(rep_text, str):
464 text = rep_text.lower()
465 break
466 result["content.body"] = text
467
435468 return result
152152 argument list.
153153
154154 Returns:
155 dict: If POST/PUT request then dictionary must be JSON serialisable,
155 If POST/PUT request then dictionary must be JSON serialisable,
156156 otherwise must be appropriate for adding as query args.
157157 """
158158 return {}
1212 # limitations under the License.
1313
1414 import logging
15 from typing import TYPE_CHECKING, Tuple
15 from typing import TYPE_CHECKING, Optional, Tuple
1616
1717 from twisted.web.server import Request
1818
1919 from synapse.http.server import HttpServer
20 from synapse.http.servlet import parse_json_object_from_request
2021 from synapse.replication.http._base import ReplicationEndpoint
2122 from synapse.types import JsonDict
2223
6162 def __init__(self, hs: "HomeServer"):
6263 super().__init__(hs)
6364
64 self.device_list_updater = hs.get_device_handler().device_list_updater
65 from synapse.handlers.device import DeviceHandler
66
67 handler = hs.get_device_handler()
68 assert isinstance(handler, DeviceHandler)
69 self.device_list_updater = handler.device_list_updater
70
6571 self.store = hs.get_datastores().main
6672 self.clock = hs.get_clock()
6773
7177
7278 async def _handle_request( # type: ignore[override]
7379 self, request: Request, user_id: str
74 ) -> Tuple[int, JsonDict]:
80 ) -> Tuple[int, Optional[JsonDict]]:
7581 user_devices = await self.device_list_updater.user_device_resync(user_id)
7682
7783 return 200, user_devices
7884
7985
86 class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint):
87 """Ask master to upload keys for the user and send them out over federation to
88 update other servers.
89
90 For now, only the master is permitted to handle key upload requests;
91 any worker can handle key query requests (since they're read-only).
92
93 Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on
94 the main process to accomplish this.
95
96 Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload
97 Request format(borrowed and expanded from KeyUploadServlet):
98
99 POST /_synapse/replication/upload_keys_for_user
100
101 {
102 "user_id": "<user_id>",
103 "device_id": "<device_id>",
104 "keys": {
105 ....this part can be found in KeyUploadServlet in rest/client/keys.py....
106 }
107 }
108
109 Response is equivalent to ` /_matrix/client/v3/keys/upload` found in KeyUploadServlet
110
111 """
112
113 NAME = "upload_keys_for_user"
114 PATH_ARGS = ()
115 CACHE = False
116
117 def __init__(self, hs: "HomeServer"):
118 super().__init__(hs)
119
120 self.e2e_keys_handler = hs.get_e2e_keys_handler()
121 self.store = hs.get_datastores().main
122 self.clock = hs.get_clock()
123
124 @staticmethod
125 async def _serialize_payload( # type: ignore[override]
126 user_id: str, device_id: str, keys: JsonDict
127 ) -> JsonDict:
128
129 return {
130 "user_id": user_id,
131 "device_id": device_id,
132 "keys": keys,
133 }
134
135 async def _handle_request( # type: ignore[override]
136 self, request: Request
137 ) -> Tuple[int, JsonDict]:
138 content = parse_json_object_from_request(request)
139
140 user_id = content["user_id"]
141 device_id = content["device_id"]
142 keys = content["keys"]
143
144 results = await self.e2e_keys_handler.upload_keys_for_user(
145 user_id, device_id, keys
146 )
147
148 return 200, results
149
150
80151 def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
81152 ReplicationUserDevicesResyncRestServlet(hs).register(http_server)
153 ReplicationUploadKeysForUserRestServlet(hs).register(http_server)
+0
-13
synapse/replication/slave/__init__.py less more
0 # Copyright 2016 OpenMarket Ltd
1 #
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
+0
-13
synapse/replication/slave/storage/__init__.py less more
0 # Copyright 2016 OpenMarket Ltd
1 #
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
+0
-50
synapse/replication/slave/storage/_slaved_id_tracker.py less more
0 # Copyright 2016 OpenMarket Ltd
1 #
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13 from typing import List, Optional, Tuple
14
15 from synapse.storage.database import LoggingDatabaseConnection
16 from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id
17
18
19 class SlavedIdTracker(AbstractStreamIdTracker):
20 """Tracks the "current" stream ID of a stream with a single writer.
21
22 See `AbstractStreamIdTracker` for more details.
23
24 Note that this class does not work correctly when there are multiple
25 writers.
26 """
27
28 def __init__(
29 self,
30 db_conn: LoggingDatabaseConnection,
31 table: str,
32 column: str,
33 extra_tables: Optional[List[Tuple[str, str]]] = None,
34 step: int = 1,
35 ):
36 self.step = step
37 self._current = _load_current_id(db_conn, table, column, step)
38 if extra_tables:
39 for table, column in extra_tables:
40 self.advance(None, _load_current_id(db_conn, table, column))
41
42 def advance(self, instance_name: Optional[str], new_id: int) -> None:
43 self._current = (max if self.step > 0 else min)(self._current, new_id)
44
45 def get_current_token(self) -> int:
46 return self._current
47
48 def get_current_token_for_writer(self, instance_name: str) -> int:
49 return self.get_current_token()
244244 self._parse_and_dispatch_line(line)
245245
246246 def _parse_and_dispatch_line(self, line: bytes) -> None:
247 if line.strip() == "":
247 if line.strip() == b"":
248248 # Ignore blank lines
249249 return
250250
237237 """
238238 Register all the admin servlets.
239239 """
240 # Admin servlets aren't registered on workers.
241 if hs.config.worker.worker_app is not None:
242 return
243
240244 register_servlets_for_client_rest_resource(hs, http_server)
241245 BlockRoomRestServlet(hs).register(http_server)
242246 ListRoomRestServlet(hs).register(http_server)
253257 UserTokenRestServlet(hs).register(http_server)
254258 UserRestServletV2(hs).register(http_server)
255259 UsersRestServletV2(hs).register(http_server)
256 DeviceRestServlet(hs).register(http_server)
257 DevicesRestServlet(hs).register(http_server)
258 DeleteDevicesRestServlet(hs).register(http_server)
259260 UserMediaStatisticsRestServlet(hs).register(http_server)
260261 EventReportDetailRestServlet(hs).register(http_server)
261262 EventReportsRestServlet(hs).register(http_server)
279280 UserByExternalId(hs).register(http_server)
280281 UserByThreePid(hs).register(http_server)
281282
282 # Some servlets only get registered for the main process.
283 if hs.config.worker.worker_app is None:
284 SendServerNoticeServlet(hs).register(http_server)
285 BackgroundUpdateEnabledRestServlet(hs).register(http_server)
286 BackgroundUpdateRestServlet(hs).register(http_server)
287 BackgroundUpdateStartJobRestServlet(hs).register(http_server)
283 DeviceRestServlet(hs).register(http_server)
284 DevicesRestServlet(hs).register(http_server)
285 DeleteDevicesRestServlet(hs).register(http_server)
286 SendServerNoticeServlet(hs).register(http_server)
287 BackgroundUpdateEnabledRestServlet(hs).register(http_server)
288 BackgroundUpdateRestServlet(hs).register(http_server)
289 BackgroundUpdateStartJobRestServlet(hs).register(http_server)
288290
289291
290292 def register_servlets_for_client_rest_resource(
293295 """Register only the servlets which need to be exposed on /_matrix/client/xxx"""
294296 WhoisRestServlet(hs).register(http_server)
295297 PurgeHistoryStatusRestServlet(hs).register(http_server)
296 DeactivateAccountRestServlet(hs).register(http_server)
297298 PurgeHistoryRestServlet(hs).register(http_server)
298 ResetPasswordRestServlet(hs).register(http_server)
299 # The following resources can only be run on the main process.
300 if hs.config.worker.worker_app is None:
301 DeactivateAccountRestServlet(hs).register(http_server)
302 ResetPasswordRestServlet(hs).register(http_server)
299303 SearchUsersRestServlet(hs).register(http_server)
300304 UserRegisterServlet(hs).register(http_server)
301305 AccountValidityRenewServlet(hs).register(http_server)
1515 from typing import TYPE_CHECKING, Tuple
1616
1717 from synapse.api.errors import NotFoundError, SynapseError
18 from synapse.handlers.device import DeviceHandler
1819 from synapse.http.servlet import (
1920 RestServlet,
2021 assert_params_in_dict,
4243 def __init__(self, hs: "HomeServer"):
4344 super().__init__()
4445 self.auth = hs.get_auth()
45 self.device_handler = hs.get_device_handler()
46 handler = hs.get_device_handler()
47 assert isinstance(handler, DeviceHandler)
48 self.device_handler = handler
4649 self.store = hs.get_datastores().main
4750 self.is_mine = hs.is_mine
4851
111114
112115 def __init__(self, hs: "HomeServer"):
113116 self.auth = hs.get_auth()
114 self.device_handler = hs.get_device_handler()
117 handler = hs.get_device_handler()
118 assert isinstance(handler, DeviceHandler)
119 self.device_handler = handler
115120 self.store = hs.get_datastores().main
116121 self.is_mine = hs.is_mine
117122
142147
143148 def __init__(self, hs: "HomeServer"):
144149 self.auth = hs.get_auth()
145 self.device_handler = hs.get_device_handler()
150 handler = hs.get_device_handler()
151 assert isinstance(handler, DeviceHandler)
152 self.device_handler = handler
146153 self.store = hs.get_datastores().main
147154 self.is_mine = hs.is_mine
148155
902902 @user:server/pushers
903903
904904 Returns:
905 pushers: Dictionary containing pushers information.
906 total: Number of pushers in dictionary `pushers`.
905 A dictionary with keys:
906 pushers: Dictionary containing pushers information.
907 total: Number of pushers in dictionary `pushers`.
907908 """
908909
909910 PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$")
1919
2020 from synapse.api import errors
2121 from synapse.api.errors import NotFoundError
22 from synapse.handlers.device import DeviceHandler
2223 from synapse.http.server import HttpServer
2324 from synapse.http.servlet import (
2425 RestServlet,
7980 super().__init__()
8081 self.hs = hs
8182 self.auth = hs.get_auth()
82 self.device_handler = hs.get_device_handler()
83 handler = hs.get_device_handler()
84 assert isinstance(handler, DeviceHandler)
85 self.device_handler = handler
8386 self.auth_handler = hs.get_auth_handler()
8487
8588 class PostBody(RequestBodyModel):
124127 super().__init__()
125128 self.hs = hs
126129 self.auth = hs.get_auth()
127 self.device_handler = hs.get_device_handler()
130 handler = hs.get_device_handler()
131 assert isinstance(handler, DeviceHandler)
132 self.device_handler = handler
128133 self.auth_handler = hs.get_auth_handler()
129134 self._msc3852_enabled = hs.config.experimental.msc3852_enabled
130135
255260 super().__init__()
256261 self.hs = hs
257262 self.auth = hs.get_auth()
258 self.device_handler = hs.get_device_handler()
263 handler = hs.get_device_handler()
264 assert isinstance(handler, DeviceHandler)
265 self.device_handler = handler
259266
260267 async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
261268 requester = await self.auth.get_user_by_req(request)
312319 super().__init__()
313320 self.hs = hs
314321 self.auth = hs.get_auth()
315 self.device_handler = hs.get_device_handler()
322 handler = hs.get_device_handler()
323 assert isinstance(handler, DeviceHandler)
324 self.device_handler = handler
316325
317326 class PostBody(RequestBodyModel):
318327 device_id: StrictStr
2626 )
2727 from synapse.http.site import SynapseRequest
2828 from synapse.logging.opentracing import log_kv, set_tag
29 from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet
2930 from synapse.rest.client._base import client_patterns, interactive_auth_handler
3031 from synapse.types import JsonDict, StreamToken
3132 from synapse.util.cancellation import cancellable
4243 Content-Type: application/json
4344
4445 {
45 "device_keys": {
46 "user_id": "<user_id>",
47 "device_id": "<device_id>",
48 "valid_until_ts": <millisecond_timestamp>,
49 "algorithms": [
50 "m.olm.curve25519-aes-sha2",
51 ]
52 "keys": {
53 "<algorithm>:<device_id>": "<key_base64>",
46 "device_keys": {
47 "user_id": "<user_id>",
48 "device_id": "<device_id>",
49 "valid_until_ts": <millisecond_timestamp>,
50 "algorithms": [
51 "m.olm.curve25519-aes-sha2",
52 ]
53 "keys": {
54 "<algorithm>:<device_id>": "<key_base64>",
55 },
56 "signatures:" {
57 "<user_id>" {
58 "<algorithm>:<device_id>": "<signature_base64>"
59 }
60 }
5461 },
55 "signatures:" {
56 "<user_id>" {
57 "<algorithm>:<device_id>": "<signature_base64>"
58 } } },
59 "one_time_keys": {
60 "<algorithm>:<key_id>": "<key_base64>"
61 },
62 "fallback_keys": {
63 "<algorithm>:<device_id>": "<key_base64>",
64 "signed_<algorithm>:<device_id>": {
65 "fallback": true,
66 "key": "<key_base64>",
67 "signatures": {
68 "<user_id>": {
69 "<algorithm>:<device_id>": "<key_base64>"
70 }
71 }
72 }
73 }
74 "one_time_keys": {
75 "<algorithm>:<key_id>": "<key_base64>"
76 },
6277 }
78
79 response, e.g.:
80
81 {
82 "one_time_key_counts": {
83 "curve25519": 10,
84 "signed_curve25519": 20
85 }
86 }
87
6388 """
6489
6590 PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$")
6994 self.auth = hs.get_auth()
7095 self.e2e_keys_handler = hs.get_e2e_keys_handler()
7196 self.device_handler = hs.get_device_handler()
97
98 if hs.config.worker.worker_app is None:
99 # if main process
100 self.key_uploader = self.e2e_keys_handler.upload_keys_for_user
101 else:
102 # then a worker
103 self.key_uploader = ReplicationUploadKeysForUserRestServlet.make_client(hs)
72104
73105 async def on_POST(
74106 self, request: SynapseRequest, device_id: Optional[str]
108140 400, "To upload keys, you must pass device_id when authenticating"
109141 )
110142
111 result = await self.e2e_keys_handler.upload_keys_for_user(
112 user_id, device_id, body
143 result = await self.key_uploader(
144 user_id=user_id, device_id=device_id, keys=body
113145 )
114146 return 200, result
115147
349349 auth_provider_session_id: The session ID got during login from the SSO IdP.
350350
351351 Returns:
352 result: Dictionary of account information after successful login.
352 Dictionary of account information after successful login.
353353 """
354354
355355 # Before we actually log them in we check if they've already logged in
1414 import logging
1515 from typing import TYPE_CHECKING, Tuple
1616
17 from synapse.handlers.device import DeviceHandler
1718 from synapse.http.server import HttpServer
1819 from synapse.http.servlet import RestServlet
1920 from synapse.http.site import SynapseRequest
3334 super().__init__()
3435 self.auth = hs.get_auth()
3536 self._auth_handler = hs.get_auth_handler()
36 self._device_handler = hs.get_device_handler()
37 handler = hs.get_device_handler()
38 assert isinstance(handler, DeviceHandler)
39 self._device_handler = handler
3740
3841 async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
3942 requester = await self.auth.get_user_by_req(request, allow_expired=True)
5861 super().__init__()
5962 self.auth = hs.get_auth()
6063 self._auth_handler = hs.get_auth_handler()
61 self._device_handler = hs.get_device_handler()
64 handler = hs.get_device_handler()
65 assert isinstance(handler, DeviceHandler)
66 self._device_handler = handler
6267
6368 async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
6469 requester = await self.auth.get_user_by_req(request, allow_expired=True)
12831283 `dir` can be `f` or `b` to indicate forwards and backwards in time from the
12841284 given timestamp.
12851285
1286 GET /_matrix/client/unstable/org.matrix.msc3030/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>
1286 GET /_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>
12871287 {
12881288 "event_id": ...
12891289 }
12901290 """
12911291
12921292 PATTERNS = (
1293 re.compile(
1294 "^/_matrix/client/unstable/org.matrix.msc3030"
1295 "/rooms/(?P<room_id>[^/]*)/timestamp_to_event$"
1296 ),
1293 re.compile("^/_matrix/client/v1/rooms/(?P<room_id>[^/]*)/timestamp_to_event$"),
12971294 )
12981295
12991296 def __init__(self, hs: "HomeServer"):
14201417 RoomAliasListServlet(hs).register(http_server)
14211418 SearchRestServlet(hs).register(http_server)
14221419 RoomCreateRestServlet(hs).register(http_server)
1423 if hs.config.experimental.msc3030_enabled:
1424 TimestampLookupRestServlet(hs).register(http_server)
1420 TimestampLookupRestServlet(hs).register(http_server)
14251421
14261422 # Some servlets only get registered for the main process.
14271423 if not is_worker:
100100 "org.matrix.msc3827.stable": True,
101101 # Adds support for importing historical messages as per MSC2716
102102 "org.matrix.msc2716": self.config.experimental.msc2716_enabled,
103 # Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030
104 "org.matrix.msc3030": self.config.experimental.msc3030_enabled,
105103 # Adds support for thread relations, per MSC3440.
106104 "org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above
107105 # Support for thread read receipts & notification counts.
343343 download from remote server.
344344
345345 Args:
346 server_name (str): Remote server_name where the media originated.
347 media_id (str): The media ID of the content (as defined by the
346 server_name: Remote server_name where the media originated.
347 media_id: The media ID of the content (as defined by the
348348 remote server).
349349
350350 Returns:
137137 """Rescales the image to the given dimensions.
138138
139139 Returns:
140 BytesIO: the bytes of the encoded image ready to be written to disk
140 The bytes of the encoded image ready to be written to disk
141141 """
142142 with self._resize(width, height) as scaled:
143143 return self._encode_image(scaled, output_type)
154154 max_height: The largest possible height.
155155
156156 Returns:
157 BytesIO: the bytes of the encoded image ready to be written to disk
157 The bytes of the encoded image ready to be written to disk
158158 """
159159 if width * self.height > height * self.width:
160160 scaled_width = width
509509 )
510510
511511 @cache_in_self
512 def get_device_handler(self):
512 def get_device_handler(self) -> DeviceWorkerHandler:
513513 if self.config.worker.worker_app:
514514 return DeviceWorkerHandler(self)
515515 else:
112112 """Deep-copy a structure, carrying out string substitutions on any strings
113113
114114 Args:
115 x (object): structure to be copied
116 substitutions (object): substitutions to be made - passed into the
117 string '%' operator
115 x: structure to be copied
116 substitutions: substitutions to be made - passed into the string '%' operator
118117
119118 Returns:
120119 copy of x
169169 room_id: The room id of the server notices room
170170
171171 Returns:
172 bool: Is the room currently blocked
173 list: The list of pinned event IDs that are unrelated to limit blocking
174 This list can be used as a convenience in the case where the block
175 is to be lifted and the remaining pinned event references need to be
176 preserved
172 Tuple of:
173 Is the room currently blocked
174
175 The list of pinned event IDs that are unrelated to limit blocking
176 This list can be used as a convenience in the case where the block
177 is to be lifted and the remaining pinned event references need to be
178 preserved
177179 """
178180 currently_blocked = False
179181 pinned_state_event = None
189189 room_id: str,
190190 event_ids: Collection[str],
191191 state_filter: Optional[StateFilter] = None,
192 await_full_state: bool = True,
192193 ) -> StateMap[str]:
193194 """Fetch the state after each of the given event IDs. Resolve them and return.
194195
199200 Args:
200201 room_id: the room_id containing the given events.
201202 event_ids: the events whose state should be fetched and resolved.
203 await_full_state: if `True`, will block if we do not yet have complete state
204 at the given `event_id`s, regardless of whether `state_filter` is
205 satisfied by partial state.
202206
203207 Returns:
204208 the state dict (a mapping from (event_type, state_key) -> event_id) which
205209 holds the resolution of the states after the given event IDs.
206210 """
207211 logger.debug("calling resolve_state_groups from compute_state_after_events")
208 ret = await self.resolve_state_groups_for_events(room_id, event_ids)
212 ret = await self.resolve_state_groups_for_events(
213 room_id, event_ids, await_full_state
214 )
209215 return await ret.get_state(self._state_storage_controller, state_filter)
210216
211217 async def get_current_user_ids_in_room(
203203 process to to so, calling the per_item_callback for each item.
204204
205205 Args:
206 room_id (str):
207 task (_EventPersistQueueTask): A _PersistEventsTask or
208 _UpdateCurrentStateTask to process.
206 room_id:
207 task: A _PersistEventsTask or _UpdateCurrentStateTask to process.
209208
210209 Returns:
211210 the result returned by the `_per_item_callback` passed to
568568 retcols=["update_name"],
569569 desc="check_background_updates",
570570 )
571 updates = [x["update_name"] for x in updates]
571 background_update_names = [x["update_name"] for x in updates]
572572
573573 for table, update_name in UNIQUE_INDEX_BACKGROUND_UPDATES.items():
574 if update_name not in updates:
574 if update_name not in background_update_names:
575575 logger.debug("Now safe to upsert in %s", table)
576576 self._unsafe_to_upsert_tables.discard(table)
577577
578578 # If there's any updates still running, reschedule to run.
579 if updates:
579 if background_update_names:
580580 self._clock.call_later(
581581 15.0,
582582 run_as_background_process,
11281128 values: Dict[str, Any],
11291129 insertion_values: Optional[Dict[str, Any]] = None,
11301130 desc: str = "simple_upsert",
1131 lock: bool = True,
11321131 ) -> bool:
11331132 """Insert a row with values + insertion_values; on conflict, update with values.
11341133
11531152 requiring that a unique index exist on the column names used to detect a
11541153 conflict (i.e. `keyvalues.keys()`).
11551154
1156 If there is no such index, we can "emulate" an upsert with a SELECT followed
1157 by either an INSERT or an UPDATE. This is unsafe: we cannot make the same
1158 atomicity guarantees that a native upsert can and are very vulnerable to races
1159 and crashes. Therefore if we wish to upsert without an appropriate unique index,
1160 we must either:
1161
1162 1. Acquire a table-level lock before the emulated upsert (`lock=True`), or
1163 2. VERY CAREFULLY ensure that we are the only thread and worker which will be
1164 writing to this table, in which case we can proceed without a lock
1165 (`lock=False`).
1166
1167 Generally speaking, you should use `lock=True`. If the table in question has a
1168 unique index[*], this class will use a native upsert (which is atomic and so can
1169 ignore the `lock` argument). Otherwise this class will use an emulated upsert,
1170 in which case we want the safer option unless we been VERY CAREFUL.
1155 If there is no such index yet[*], we can "emulate" an upsert with a SELECT
1156 followed by either an INSERT or an UPDATE. This is unsafe unless *all* upserters
1157 run at the SERIALIZABLE isolation level: we cannot make the same atomicity
1158 guarantees that a native upsert can and are very vulnerable to races and
1159 crashes. Therefore to upsert without an appropriate unique index, we acquire a
1160 table-level lock before the emulated upsert.
11711161
11721162 [*]: Some tables have unique indices added to them in the background. Those
11731163 tables `T` are keys in the dictionary UNIQUE_INDEX_BACKGROUND_UPDATES,
11881178 values: The nonunique columns and their new values
11891179 insertion_values: additional key/values to use only when inserting
11901180 desc: description of the transaction, for logging and metrics
1191 lock: True to lock the table when doing the upsert.
11921181 Returns:
11931182 Returns True if a row was inserted or updated (i.e. if `values` is
11941183 not empty then this always returns True)
12081197 keyvalues,
12091198 values,
12101199 insertion_values,
1211 lock=lock,
12121200 db_autocommit=autocommit,
12131201 )
12141202 except self.engine.module.IntegrityError as e:
12311219 values: Dict[str, Any],
12321220 insertion_values: Optional[Dict[str, Any]] = None,
12331221 where_clause: Optional[str] = None,
1234 lock: bool = True,
12351222 ) -> bool:
12361223 """
12371224 Pick the UPSERT method which works best on the platform. Either the
12441231 values: The nonunique columns and their new values
12451232 insertion_values: additional key/values to use only when inserting
12461233 where_clause: An index predicate to apply to the upsert.
1247 lock: True to lock the table when doing the upsert. Unused when performing
1248 a native upsert.
12491234 Returns:
12501235 Returns True if a row was inserted or updated (i.e. if `values` is
12511236 not empty then this always returns True)
12691254 values,
12701255 insertion_values=insertion_values,
12711256 where_clause=where_clause,
1272 lock=lock,
12731257 )
12741258
12751259 def simple_upsert_txn_emulated(
12901274 insertion_values: additional key/values to use only when inserting
12911275 where_clause: An index predicate to apply to the upsert.
12921276 lock: True to lock the table when doing the upsert.
1277 Must not be False unless the table has already been locked.
12931278 Returns:
12941279 Returns True if a row was inserted or updated (i.e. if `values` is
12951280 not empty then this always returns True)
12961281 """
12971282 insertion_values = insertion_values or {}
12981283
1299 # We need to lock the table :(, unless we're *really* careful
13001284 if lock:
1285 # We need to lock the table :(
13011286 self.engine.lock_table(txn, table)
13021287
13031288 def _getwhere(key: str) -> str:
14051390 value_names: Collection[str],
14061391 value_values: Collection[Collection[Any]],
14071392 desc: str,
1408 lock: bool = True,
14091393 ) -> None:
14101394 """
14111395 Upsert, many times.
14171401 value_names: The value column names
14181402 value_values: A list of each row's value column values.
14191403 Ignored if value_names is empty.
1420 lock: True to lock the table when doing the upsert. Unused when performing
1421 a native upsert.
14221404 """
14231405
14241406 # We can autocommit if it safe to upsert
14321414 key_values,
14331415 value_names,
14341416 value_values,
1435 lock=lock,
14361417 db_autocommit=autocommit,
14371418 )
14381419
14441425 key_values: Collection[Iterable[Any]],
14451426 value_names: Collection[str],
14461427 value_values: Iterable[Iterable[Any]],
1447 lock: bool = True,
14481428 ) -> None:
14491429 """
14501430 Upsert, many times.
14561436 value_names: The value column names
14571437 value_values: A list of each row's value column values.
14581438 Ignored if value_names is empty.
1459 lock: True to lock the table when doing the upsert. Unused when performing
1460 a native upsert.
14611439 """
14621440 if table not in self._unsafe_to_upsert_tables:
14631441 return self.simple_upsert_many_txn_native_upsert(
14651443 )
14661444 else:
14671445 return self.simple_upsert_many_txn_emulated(
1468 txn, table, key_names, key_values, value_names, value_values, lock=lock
1446 txn,
1447 table,
1448 key_names,
1449 key_values,
1450 value_names,
1451 value_values,
14691452 )
14701453
14711454 def simple_upsert_many_txn_emulated(
14761459 key_values: Collection[Iterable[Any]],
14771460 value_names: Collection[str],
14781461 value_values: Iterable[Iterable[Any]],
1479 lock: bool = True,
14801462 ) -> None:
14811463 """
14821464 Upsert, many times, but without native UPSERT support or batching.
14881470 value_names: The value column names
14891471 value_values: A list of each row's value column values.
14901472 Ignored if value_names is empty.
1491 lock: True to lock the table when doing the upsert.
14921473 """
14931474 # No value columns, therefore make a blank list so that the following
14941475 # zip() works correctly.
14951476 if not value_names:
14961477 value_values = [() for x in range(len(key_values))]
14971478
1498 if lock:
1499 # Lock the table just once, to prevent it being done once per row.
1500 # Note that, according to Postgres' documentation, once obtained,
1501 # the lock is held for the remainder of the current transaction.
1502 self.engine.lock_table(txn, "user_ips")
1479 # Lock the table just once, to prevent it being done once per row.
1480 # Note that, according to Postgres' documentation, once obtained,
1481 # the lock is held for the remainder of the current transaction.
1482 self.engine.lock_table(txn, "user_ips")
15031483
15041484 for keyv, valv in zip(key_values, value_values):
15051485 _keys = {x: y for x, y in zip(key_names, keyv)}
20742054 retcols: Collection[str],
20752055 allow_none: bool = False,
20762056 ) -> Optional[Dict[str, Any]]:
2077 select_sql = "SELECT %s FROM %s WHERE %s" % (
2078 ", ".join(retcols),
2079 table,
2080 " AND ".join("%s = ?" % (k,) for k in keyvalues),
2081 )
2082
2083 txn.execute(select_sql, list(keyvalues.values()))
2057 select_sql = "SELECT %s FROM %s" % (", ".join(retcols), table)
2058
2059 if keyvalues:
2060 select_sql += " WHERE %s" % (" AND ".join("%s = ?" % k for k in keyvalues),)
2061 txn.execute(select_sql, list(keyvalues.values()))
2062 else:
2063 txn.execute(select_sql)
2064
20842065 row = txn.fetchone()
20852066
20862067 if not row:
2626 )
2727
2828 from synapse.api.constants import AccountDataTypes
29 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
3029 from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream
3130 from synapse.storage._base import db_to_json
3231 from synapse.storage.database import (
6766 # to write account data. A value of `True` implies that `_account_data_id_gen`
6867 # is an `AbstractStreamIdGenerator` and not just a tracker.
6968 self._account_data_id_gen: AbstractStreamIdTracker
69 self._can_write_to_account_data = (
70 self._instance_name in hs.config.worker.writers.account_data
71 )
7072
7173 if isinstance(database.engine, PostgresEngine):
72 self._can_write_to_account_data = (
73 self._instance_name in hs.config.worker.writers.account_data
74 )
75
7674 self._account_data_id_gen = MultiWriterIdGenerator(
7775 db_conn=db_conn,
7876 db=database,
9492 # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
9593 # updated over replication. (Multiple writers are not supported for
9694 # SQLite).
97 if self._instance_name in hs.config.worker.writers.account_data:
98 self._can_write_to_account_data = True
99 self._account_data_id_gen = StreamIdGenerator(
100 db_conn,
101 "room_account_data",
102 "stream_id",
103 extra_tables=[("room_tags_revisions", "stream_id")],
104 )
105 else:
106 self._account_data_id_gen = SlavedIdTracker(
107 db_conn,
108 "room_account_data",
109 "stream_id",
110 extra_tables=[("room_tags_revisions", "stream_id")],
111 )
95 self._account_data_id_gen = StreamIdGenerator(
96 db_conn,
97 "room_account_data",
98 "stream_id",
99 extra_tables=[("room_tags_revisions", "stream_id")],
100 is_writer=self._instance_name in hs.config.worker.writers.account_data,
101 )
112102
113103 account_max = self.get_max_account_data_stream_id()
114104 self._account_data_stream_cache = StreamChangeCache(
458448 content_json = json_encoder.encode(content)
459449
460450 async with self._account_data_id_gen.get_next() as next_id:
461 # no need to lock here as room_account_data has a unique constraint
462 # on (user_id, room_id, account_data_type) so simple_upsert will
463 # retry if there is a conflict.
464451 await self.db_pool.simple_upsert(
465452 desc="add_room_account_data",
466453 table="room_account_data",
470457 "account_data_type": account_data_type,
471458 },
472459 values={"stream_id": next_id, "content": content_json},
473 lock=False,
474460 )
475461
476462 self._account_data_stream_cache.entity_has_changed(user_id, next_id)
526512 ) -> None:
527513 content_json = json_encoder.encode(content)
528514
529 # no need to lock here as account_data has a unique constraint on
530 # (user_id, account_data_type) so simple_upsert will retry if
531 # there is a conflict.
532515 self.db_pool.simple_upsert_txn(
533516 txn,
534517 table="account_data",
535518 keyvalues={"user_id": user_id, "account_data_type": account_data_type},
536519 values={"stream_id": next_id, "content": content_json},
537 lock=False,
538520 )
539521
540522 # Ignored users get denormalized into a separate table as an optimisation.
1919 ApplicationService,
2020 ApplicationServiceState,
2121 AppServiceTransaction,
22 TransactionOneTimeKeyCounts,
22 TransactionOneTimeKeysCount,
2323 TransactionUnusedFallbackKeys,
2424 )
2525 from synapse.config.appservice import load_appservices
259259 events: List[EventBase],
260260 ephemeral: List[JsonDict],
261261 to_device_messages: List[JsonDict],
262 one_time_key_counts: TransactionOneTimeKeyCounts,
262 one_time_keys_count: TransactionOneTimeKeysCount,
263263 unused_fallback_keys: TransactionUnusedFallbackKeys,
264264 device_list_summary: DeviceListUpdates,
265265 ) -> AppServiceTransaction:
272272 events: A list of persistent events to put in the transaction.
273273 ephemeral: A list of ephemeral events to put in the transaction.
274274 to_device_messages: A list of to-device messages to put in the transaction.
275 one_time_key_counts: Counts of remaining one-time keys for relevant
275 one_time_keys_count: Counts of remaining one-time keys for relevant
276276 appservice devices in the transaction.
277277 unused_fallback_keys: Lists of unused fallback keys for relevant
278278 appservice devices in the transaction.
298298 events=events,
299299 ephemeral=ephemeral,
300300 to_device_messages=to_device_messages,
301 one_time_key_counts=one_time_key_counts,
301 one_time_keys_count=one_time_keys_count,
302302 unused_fallback_keys=unused_fallback_keys,
303303 device_list_summary=device_list_summary,
304304 )
378378 events=events,
379379 ephemeral=[],
380380 to_device_messages=[],
381 one_time_key_counts={},
381 one_time_keys_count={},
382382 unused_fallback_keys={},
383383 device_list_summary=DeviceListUpdates(),
384384 )
450450 table="application_services_state",
451451 keyvalues={"as_id": service.id},
452452 values={f"{stream_type}_stream_id": pos},
453 # no need to lock when emulating upsert: as_id is a unique key
454 lock=False,
455453 desc="set_appservice_stream_type_pos",
456454 )
457455
258258
259259 if relates_to:
260260 self._attempt_to_invalidate_cache("get_relations_for_event", (relates_to,))
261 self._attempt_to_invalidate_cache("get_references_for_event", (relates_to,))
261262 self._attempt_to_invalidate_cache(
262263 "get_aggregation_groups_for_event", (relates_to,)
263264 )
3737 whitelisted_homeserver,
3838 )
3939 from synapse.metrics.background_process_metrics import wrap_as_background_process
40 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
4140 from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
4241 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
4342 from synapse.storage.database import (
8584 ):
8685 super().__init__(database, db_conn, hs)
8786
88 if hs.config.worker.worker_app is None:
89 self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
90 db_conn,
91 "device_lists_stream",
92 "stream_id",
93 extra_tables=[
94 ("user_signature_stream", "stream_id"),
95 ("device_lists_outbound_pokes", "stream_id"),
96 ("device_lists_changes_in_room", "stream_id"),
97 ],
98 )
99 else:
100 self._device_list_id_gen = SlavedIdTracker(
101 db_conn,
102 "device_lists_stream",
103 "stream_id",
104 extra_tables=[
105 ("user_signature_stream", "stream_id"),
106 ("device_lists_outbound_pokes", "stream_id"),
107 ("device_lists_changes_in_room", "stream_id"),
108 ],
109 )
87 # In the worker store this is an ID tracker which we overwrite in the non-worker
88 # class below that is used on the main process.
89 self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
90 db_conn,
91 "device_lists_stream",
92 "stream_id",
93 extra_tables=[
94 ("user_signature_stream", "stream_id"),
95 ("device_lists_outbound_pokes", "stream_id"),
96 ("device_lists_changes_in_room", "stream_id"),
97 ],
98 is_writer=hs.config.worker.worker_app is None,
99 )
110100
111101 # Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a
112102 # StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker).
534524 limit: Maximum number of device updates to return
535525
536526 Returns:
537 List: List of device update tuples:
527 List of device update tuples:
538528 - user_id
539529 - device_id
540530 - stream_id
14501440 self._remove_duplicate_outbound_pokes,
14511441 )
14521442
1443 self.db_pool.updates.register_background_index_update(
1444 "device_lists_changes_in_room_by_room_index",
1445 index_name="device_lists_changes_in_room_by_room_idx",
1446 table="device_lists_changes_in_room",
1447 columns=["room_id", "stream_id"],
1448 )
1449
14531450 async def _drop_device_list_streams_non_unique_indexes(
14541451 self, progress: JsonDict, batch_size: int
14551452 ) -> int:
17461743 table="device_lists_remote_cache",
17471744 keyvalues={"user_id": user_id, "device_id": device_id},
17481745 values={"content": json_encoder.encode(content)},
1749 # we don't need to lock, because we assume we are the only thread
1750 # updating this user's devices.
1751 lock=False,
17521746 )
17531747
17541748 txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id))
17621756 table="device_lists_remote_extremeties",
17631757 keyvalues={"user_id": user_id},
17641758 values={"stream_id": stream_id},
1765 # again, we can assume we are the only thread updating this user's
1766 # extremity.
1767 lock=False,
17681759 )
17691760
17701761 async def update_remote_device_list_cache(
18171808 table="device_lists_remote_extremeties",
18181809 keyvalues={"user_id": user_id},
18191810 values={"stream_id": stream_id},
1820 # we don't need to lock, because we can assume we are the only thread
1821 # updating this user's extremity.
1822 lock=False,
18231811 )
18241812
18251813 async def add_device_change_to_streams(
20172005 )
20182006
20192007 async def get_uncoverted_outbound_room_pokes(
2020 self, limit: int = 10
2008 self, start_stream_id: int, start_room_id: str, limit: int = 10
20212009 ) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]:
20222010 """Get device list changes by room that have not yet been handled and
20232011 written to `device_lists_outbound_pokes`.
20242012
2013 Args:
2014 start_stream_id: Together with `start_room_id`, indicates the position after
2015 which to return device list changes.
2016 start_room_id: Together with `start_stream_id`, indicates the position after
2017 which to return device list changes.
2018 limit: The maximum number of device list changes to return.
2019
20252020 Returns:
2026 A list of user ID, device ID, room ID, stream ID and optional opentracing context.
2021 A list of user ID, device ID, room ID, stream ID and optional opentracing
2022 context, in order of ascending (stream ID, room ID).
20272023 """
20282024
20292025 sql = """
20302026 SELECT user_id, device_id, room_id, stream_id, opentracing_context
20312027 FROM device_lists_changes_in_room
2032 WHERE NOT converted_to_destinations
2033 ORDER BY stream_id
2028 WHERE
2029 (stream_id, room_id) > (?, ?) AND
2030 stream_id <= ? AND
2031 NOT converted_to_destinations
2032 ORDER BY stream_id ASC, room_id ASC
20342033 LIMIT ?
20352034 """
20362035
20372036 def get_uncoverted_outbound_room_pokes_txn(
20382037 txn: LoggingTransaction,
20392038 ) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]:
2040 txn.execute(sql, (limit,))
2039 txn.execute(
2040 sql,
2041 (
2042 start_stream_id,
2043 start_room_id,
2044 # Avoid returning rows if there may be uncommitted device list
2045 # changes with smaller stream IDs.
2046 self._device_list_id_gen.get_current_token(),
2047 limit,
2048 ),
2049 )
20412050
20422051 return [
20432052 (
20592068 user_id: str,
20602069 device_id: str,
20612070 room_id: str,
2062 stream_id: Optional[int],
20632071 hosts: Collection[str],
20642072 context: Optional[Dict[str, str]],
20652073 ) -> None:
20662074 """Queue the device update to be sent to the given set of hosts,
20672075 calculated from the room ID.
2068
2069 Marks the associated row in `device_lists_changes_in_room` as handled,
2070 if `stream_id` is provided.
2071 """
2076 """
2077 if not hosts:
2078 return
20722079
20732080 def add_device_list_outbound_pokes_txn(
20742081 txn: LoggingTransaction, stream_ids: List[int]
20752082 ) -> None:
2076 if hosts:
2077 self._add_device_outbound_poke_to_stream_txn(
2078 txn,
2079 user_id=user_id,
2080 device_id=device_id,
2081 hosts=hosts,
2082 stream_ids=stream_ids,
2083 context=context,
2084 )
2085
2086 if stream_id:
2087 self.db_pool.simple_update_txn(
2088 txn,
2089 table="device_lists_changes_in_room",
2090 keyvalues={
2091 "user_id": user_id,
2092 "device_id": device_id,
2093 "stream_id": stream_id,
2094 "room_id": room_id,
2095 },
2096 updatevalues={"converted_to_destinations": True},
2097 )
2098
2099 if not hosts:
2100 # If there are no hosts then we don't try and generate stream IDs.
2101 return await self.db_pool.runInteraction(
2102 "add_device_list_outbound_pokes",
2103 add_device_list_outbound_pokes_txn,
2104 [],
2083 self._add_device_outbound_poke_to_stream_txn(
2084 txn,
2085 user_id=user_id,
2086 device_id=device_id,
2087 hosts=hosts,
2088 stream_ids=stream_ids,
2089 context=context,
21052090 )
21062091
21072092 async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids:
21652150 "get_pending_remote_device_list_updates_for_room",
21662151 get_pending_remote_device_list_updates_for_room_txn,
21672152 )
2153
2154 async def get_device_change_last_converted_pos(self) -> Tuple[int, str]:
2155 """
2156 Get the position of the last row in `device_list_changes_in_room` that has been
2157 converted to `device_lists_outbound_pokes`.
2158
2159 Rows with a strictly greater position where `converted_to_destinations` is
2160 `FALSE` have not been converted.
2161 """
2162
2163 row = await self.db_pool.simple_select_one(
2164 table="device_lists_changes_converted_stream_position",
2165 keyvalues={},
2166 retcols=["stream_id", "room_id"],
2167 desc="get_device_change_last_converted_pos",
2168 )
2169 return row["stream_id"], row["room_id"]
2170
2171 async def set_device_change_last_converted_pos(
2172 self,
2173 stream_id: int,
2174 room_id: str,
2175 ) -> None:
2176 """
2177 Set the position of the last row in `device_list_changes_in_room` that has been
2178 converted to `device_lists_outbound_pokes`.
2179 """
2180
2181 await self.db_pool.simple_update_one(
2182 table="device_lists_changes_converted_stream_position",
2183 keyvalues={},
2184 updatevalues={"stream_id": stream_id, "room_id": room_id},
2185 desc="set_device_change_last_converted_pos",
2186 )
390390 Returns:
391391 A dict giving the info metadata for this backup version, with
392392 fields including:
393 version(str)
394 algorithm(str)
395 auth_data(object): opaque dict supplied by the client
396 etag(int): tag of the keys in the backup
393 version (str)
394 algorithm (str)
395 auth_data (object): opaque dict supplied by the client
396 etag (int): tag of the keys in the backup
397397 """
398398
399399 def _get_e2e_room_keys_version_info_txn(txn: LoggingTransaction) -> JsonDict:
3232
3333 from synapse.api.constants import DeviceKeyAlgorithms
3434 from synapse.appservice import (
35 TransactionOneTimeKeyCounts,
35 TransactionOneTimeKeysCount,
3636 TransactionUnusedFallbackKeys,
3737 )
3838 from synapse.logging.opentracing import log_kv, set_tag, trace
411411 """Retrieve a number of one-time keys for a user
412412
413413 Args:
414 user_id(str): id of user to get keys for
415 device_id(str): id of device to get keys for
416 key_ids(list[str]): list of key ids (excluding algorithm) to
417 retrieve
414 user_id: id of user to get keys for
415 device_id: id of device to get keys for
416 key_ids: list of key ids (excluding algorithm) to retrieve
418417
419418 Returns:
420419 A map from (algorithm, key_id) to json string for key
514513
515514 async def count_bulk_e2e_one_time_keys_for_as(
516515 self, user_ids: Collection[str]
517 ) -> TransactionOneTimeKeyCounts:
516 ) -> TransactionOneTimeKeysCount:
518517 """
519518 Counts, in bulk, the one-time keys for all the users specified.
520519 Intended to be used by application services for populating OTK counts in
528527
529528 def _count_bulk_e2e_one_time_keys_txn(
530529 txn: LoggingTransaction,
531 ) -> TransactionOneTimeKeyCounts:
530 ) -> TransactionOneTimeKeysCount:
532531 user_in_where_clause, user_parameters = make_in_list_sql_clause(
533532 self.database_engine, "user_id", user_ids
534533 )
541540 """
542541 txn.execute(sql, user_parameters)
543542
544 result: TransactionOneTimeKeyCounts = {}
543 result: TransactionOneTimeKeysCount = {}
545544
546545 for user_id, device_id, algorithm, count in txn:
547546 # We deliberately construct empty dictionaries for
16851685 },
16861686 insertion_values={},
16871687 desc="insert_insertion_extremity",
1688 lock=False,
16891688 )
16901689
16911690 async def insert_received_event_to_staging(
12781278 Pick the earliest non-outlier if there is one, else the earliest one.
12791279
12801280 Args:
1281 events_and_contexts (list[(EventBase, EventContext)]):
1281 events_and_contexts:
1282
12821283 Returns:
1283 list[(EventBase, EventContext)]: filtered list
1284 filtered list
12841285 """
12851286 new_events_and_contexts: OrderedDict[
12861287 str, Tuple[EventBase, EventContext]
13061307 """Update min_depth for each room
13071308
13081309 Args:
1309 txn (twisted.enterprise.adbapi.Connection): db connection
1310 events_and_contexts (list[(EventBase, EventContext)]): events
1311 we are persisting
1310 txn: db connection
1311 events_and_contexts: events we are persisting
13121312 """
13131313 depth_updates: Dict[str, int] = {}
13141314 for event, context in events_and_contexts:
15791579 """Update all the miscellaneous tables for new events
15801580
15811581 Args:
1582 txn (twisted.enterprise.adbapi.Connection): db connection
1583 events_and_contexts (list[(EventBase, EventContext)]): events
1584 we are persisting
1585 all_events_and_contexts (list[(EventBase, EventContext)]): all
1586 events that we were going to persist. This includes events
1587 we've already persisted, etc, that wouldn't appear in
1588 events_and_context.
1582 txn: db connection
1583 events_and_contexts: events we are persisting
1584 all_events_and_contexts: all events that we were going to persist.
1585 This includes events we've already persisted, etc, that wouldn't
1586 appear in events_and_context.
15891587 inhibit_local_membership_updates: Stop the local_current_membership
15901588 from being updated by these events. This should be set to True
15911589 for backfilled events because backfilled events in the past do
20502048 self.store._invalidate_cache_and_stream(
20512049 txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,)
20522050 )
2051 if rel_type == RelationTypes.REFERENCE:
2052 self.store._invalidate_cache_and_stream(
2053 txn, self.store.get_references_for_event, (redacted_relates_to,)
2054 )
20532055 if rel_type == RelationTypes.REPLACE:
20542056 self.store._invalidate_cache_and_stream(
20552057 txn, self.store.get_applicable_edit, (redacted_relates_to,)
5858 run_as_background_process,
5959 wrap_as_background_process,
6060 )
61 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
6261 from synapse.replication.tcp.streams import BackfillStream
6362 from synapse.replication.tcp.streams.events import EventsStream
6463 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
212211 # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
213212 # updated over replication. (Multiple writers are not supported for
214213 # SQLite).
215 if hs.get_instance_name() in hs.config.worker.writers.events:
216 self._stream_id_gen = StreamIdGenerator(
217 db_conn,
218 "events",
219 "stream_ordering",
220 )
221 self._backfill_id_gen = StreamIdGenerator(
222 db_conn,
223 "events",
224 "stream_ordering",
225 step=-1,
226 extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
227 )
228 else:
229 self._stream_id_gen = SlavedIdTracker(
230 db_conn, "events", "stream_ordering"
231 )
232 self._backfill_id_gen = SlavedIdTracker(
233 db_conn, "events", "stream_ordering", step=-1
234 )
214 self._stream_id_gen = StreamIdGenerator(
215 db_conn,
216 "events",
217 "stream_ordering",
218 is_writer=hs.get_instance_name() in hs.config.worker.writers.events,
219 )
220 self._backfill_id_gen = StreamIdGenerator(
221 db_conn,
222 "events",
223 "stream_ordering",
224 step=-1,
225 extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
226 is_writer=hs.get_instance_name() in hs.config.worker.writers.events,
227 )
235228
236229 events_max = self._stream_id_gen.get_current_token()
237230 curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict(
15881581 room_id: The room ID to query.
15891582
15901583 Returns:
1591 dict[str:float] of complexity version to complexity.
1584 Map of complexity version to complexity.
15921585 """
15931586 state_events = await self.get_current_state_event_counts(room_id)
15941587
216216 def _reap_users(txn: LoggingTransaction, reserved_users: List[str]) -> None:
217217 """
218218 Args:
219 reserved_users (tuple): reserved users to preserve
219 reserved_users: reserved users to preserve
220220 """
221221
222222 thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)
369369 should not appear in the MAU stats).
370370
371371 Args:
372 txn (cursor):
373 user_id (str): user to add/update
372 txn:
373 user_id: user to add/update
374374 """
375375 assert (
376376 self._update_on_this_worker
400400 add the user to the monthly active tables
401401
402402 Args:
403 user_id(str): the user_id to query
403 user_id: the user_id to query
404404 """
405405 assert (
406406 self._update_on_this_worker
2929
3030 from synapse.api.errors import StoreError
3131 from synapse.config.homeserver import ExperimentalConfig
32 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
3332 from synapse.replication.tcp.streams import PushRulesStream
3433 from synapse.storage._base import SQLBaseStore
3534 from synapse.storage.database import (
8483 push_rules = PushRules(ruleslist)
8584
8685 filtered_rules = FilteredPushRules(
87 push_rules, enabled_map, msc3664_enabled=experimental_config.msc3664_enabled
86 push_rules,
87 enabled_map,
88 msc3664_enabled=experimental_config.msc3664_enabled,
89 msc1767_enabled=experimental_config.msc1767_enabled,
8890 )
8991
9092 return filtered_rules
110112 ):
111113 super().__init__(database, db_conn, hs)
112114
113 if hs.config.worker.worker_app is None:
114 self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
115 db_conn, "push_rules_stream", "stream_id"
116 )
117 else:
118 self._push_rules_stream_id_gen = SlavedIdTracker(
119 db_conn, "push_rules_stream", "stream_id"
120 )
115 # In the worker store this is an ID tracker which we overwrite in the non-worker
116 # class below that is used on the main process.
117 self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
118 db_conn,
119 "push_rules_stream",
120 "stream_id",
121 is_writer=hs.config.worker.worker_app is None,
122 )
121123
122124 push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict(
123125 db_conn,
2626 )
2727
2828 from synapse.push import PusherConfig, ThrottleParams
29 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
3029 from synapse.replication.tcp.streams import PushersStream
3130 from synapse.storage._base import SQLBaseStore, db_to_json
3231 from synapse.storage.database import (
5857 ):
5958 super().__init__(database, db_conn, hs)
6059
61 if hs.config.worker.worker_app is None:
62 self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
63 db_conn,
64 "pushers",
65 "id",
66 extra_tables=[("deleted_pushers", "stream_id")],
67 )
68 else:
69 self._pushers_id_gen = SlavedIdTracker(
70 db_conn,
71 "pushers",
72 "id",
73 extra_tables=[("deleted_pushers", "stream_id")],
74 )
60 # In the worker store this is an ID tracker which we overwrite in the non-worker
61 # class below that is used on the main process.
62 self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator(
63 db_conn,
64 "pushers",
65 "id",
66 extra_tables=[("deleted_pushers", "stream_id")],
67 is_writer=hs.config.worker.worker_app is None,
68 )
7569
7670 self.db_pool.updates.register_background_update_handler(
7771 "remove_deactivated_pushers",
330324 async def set_throttle_params(
331325 self, pusher_id: str, room_id: str, params: ThrottleParams
332326 ) -> None:
333 # no need to lock because `pusher_throttle` has a primary key on
334 # (pusher, room_id) so simple_upsert will retry
335327 await self.db_pool.simple_upsert(
336328 "pusher_throttle",
337329 {"pusher": pusher_id, "room_id": room_id},
338330 {"last_sent_ts": params.last_sent_ts, "throttle_ms": params.throttle_ms},
339331 desc="set_throttle_params",
340 lock=False,
341332 )
342333
343334 async def _remove_deactivated_pushers(self, progress: dict, batch_size: int) -> int:
594585 device_id: Optional[str] = None,
595586 ) -> None:
596587 async with self._pushers_id_gen.get_next() as stream_id:
597 # no need to lock because `pushers` has a unique key on
598 # (app_id, pushkey, user_name) so simple_upsert will retry
599588 await self.db_pool.simple_upsert(
600589 table="pushers",
601590 keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id},
614603 "device_id": device_id,
615604 },
616605 desc="add_pusher",
617 lock=False,
618606 )
619607
620608 user_has_pusher = self.get_if_user_has_pusher.cache.get_immediate(
2626 )
2727
2828 from synapse.api.constants import EduTypes
29 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
3029 from synapse.replication.tcp.streams import ReceiptsStream
3130 from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
3231 from synapse.storage.database import (
6059 hs: "HomeServer",
6160 ):
6261 self._instance_name = hs.get_instance_name()
62
63 # In the worker store this is an ID tracker which we overwrite in the non-worker
64 # class below that is used on the main process.
6365 self._receipts_id_gen: AbstractStreamIdTracker
6466
6567 if isinstance(database.engine, PostgresEngine):
8688 # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
8789 # updated over replication. (Multiple writers are not supported for
8890 # SQLite).
89 if hs.get_instance_name() in hs.config.worker.writers.receipts:
90 self._receipts_id_gen = StreamIdGenerator(
91 db_conn, "receipts_linearized", "stream_id"
92 )
93 else:
94 self._receipts_id_gen = SlavedIdTracker(
95 db_conn, "receipts_linearized", "stream_id"
96 )
91 self._receipts_id_gen = StreamIdGenerator(
92 db_conn,
93 "receipts_linearized",
94 "stream_id",
95 is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts,
96 )
9797
9898 super().__init__(database, db_conn, hs)
9999
952952 """Returns user id from threepid
953953
954954 Args:
955 txn (cursor):
955 txn:
956956 medium: threepid medium e.g. email
957957 address: threepid address e.g. me@example.com
958958
12821282 """Sets an expiration date to the account with the given user ID.
12831283
12841284 Args:
1285 user_id (str): User ID to set an expiration date for.
1286 use_delta (bool): If set to False, the expiration date for the user will be
1285 user_id: User ID to set an expiration date for.
1286 use_delta: If set to False, the expiration date for the user will be
12871287 now + validity period. If set to True, this expiration date will be a
12881288 random value in the [now + period - d ; now + period] range, d being a
12891289 delta equal to 10% of the validity period.
1919 FrozenSet,
2020 Iterable,
2121 List,
22 Mapping,
2223 Optional,
2324 Set,
2425 Tuple,
8081 event_id: str
8182 # The sender of the related event.
8283 sender: str
83 topological_ordering: Optional[int]
84 stream_ordering: int
8584
8685
8786 class RelationsWorkerStore(SQLBaseStore):
244243 txn.execute(sql, where_args + [limit + 1])
245244
246245 events = []
247 for event_id, relation_type, sender, topo_ordering, stream_ordering in txn:
246 topo_orderings: List[int] = []
247 stream_orderings: List[int] = []
248 for event_id, relation_type, sender, topo_ordering, stream_ordering in cast(
249 List[Tuple[str, str, str, int, int]], txn
250 ):
248251 # Do not include edits for redacted events as they leak event
249252 # content.
250253 if not is_redacted or relation_type != RelationTypes.REPLACE:
251 events.append(
252 _RelatedEvent(event_id, sender, topo_ordering, stream_ordering)
253 )
254 events.append(_RelatedEvent(event_id, sender))
255 topo_orderings.append(topo_ordering)
256 stream_orderings.append(stream_ordering)
254257
255258 # If there are more events, generate the next pagination key from the
256259 # last event returned.
259262 # Instead of using the last row (which tells us there is more
260263 # data), use the last row to be returned.
261264 events = events[:limit]
262
263 topo = events[-1].topological_ordering
264 token = events[-1].stream_ordering
265 topo_orderings = topo_orderings[:limit]
266 stream_orderings = stream_orderings[:limit]
267
268 topo = topo_orderings[-1]
269 token = stream_orderings[-1]
265270 if direction == "b":
266271 # Tokens are positions between events.
267272 # This token points *after* the last event in the chunk.
393398 )
394399 return result is not None
395400
396 @cached(tree=True)
397 async def get_aggregation_groups_for_event(
398 self, event_id: str, room_id: str, limit: int = 5
399 ) -> List[JsonDict]:
400 """Get a list of annotations on the event, grouped by event type and
401 @cached()
402 async def get_aggregation_groups_for_event(self, event_id: str) -> List[JsonDict]:
403 raise NotImplementedError()
404
405 @cachedList(
406 cached_method_name="get_aggregation_groups_for_event", list_name="event_ids"
407 )
408 async def get_aggregation_groups_for_events(
409 self, event_ids: Collection[str]
410 ) -> Mapping[str, Optional[List[JsonDict]]]:
411 """Get a list of annotations on the given events, grouped by event type and
401412 aggregation key, sorted by count.
402413
403414 This is used e.g. to get the what and how many reactions have happend
404415 on an event.
405416
406417 Args:
407 event_id: Fetch events that relate to this event ID.
408 room_id: The room the event belongs to.
409 limit: Only fetch the `limit` groups.
410
411 Returns:
412 List of groups of annotations that match. Each row is a dict with
413 `type`, `key` and `count` fields.
414 """
415
416 args = [
417 event_id,
418 room_id,
419 RelationTypes.ANNOTATION,
420 limit,
421 ]
422
423 sql = """
424 SELECT type, aggregation_key, COUNT(DISTINCT sender)
425 FROM event_relations
426 INNER JOIN events USING (event_id)
427 WHERE relates_to_id = ? AND room_id = ? AND relation_type = ?
428 GROUP BY relation_type, type, aggregation_key
429 ORDER BY COUNT(*) DESC
430 LIMIT ?
431 """
432
433 def _get_aggregation_groups_for_event_txn(
418 event_ids: Fetch events that relate to these event IDs.
419
420 Returns:
421 A map of event IDs to a list of groups of annotations that match.
422 Each entry is a dict with `type`, `key` and `count` fields.
423 """
424 # The number of entries to return per event ID.
425 limit = 5
426
427 clause, args = make_in_list_sql_clause(
428 self.database_engine, "relates_to_id", event_ids
429 )
430 args.append(RelationTypes.ANNOTATION)
431
432 sql = f"""
433 SELECT
434 relates_to_id,
435 annotation.type,
436 aggregation_key,
437 COUNT(DISTINCT annotation.sender)
438 FROM events AS annotation
439 INNER JOIN event_relations USING (event_id)
440 INNER JOIN events AS parent ON
441 parent.event_id = relates_to_id
442 AND parent.room_id = annotation.room_id
443 WHERE
444 {clause}
445 AND relation_type = ?
446 GROUP BY relates_to_id, annotation.type, aggregation_key
447 ORDER BY relates_to_id, COUNT(*) DESC
448 """
449
450 def _get_aggregation_groups_for_events_txn(
434451 txn: LoggingTransaction,
435 ) -> List[JsonDict]:
452 ) -> Mapping[str, List[JsonDict]]:
436453 txn.execute(sql, args)
437454
438 return [{"type": row[0], "key": row[1], "count": row[2]} for row in txn]
455 result: Dict[str, List[JsonDict]] = {}
456 for event_id, type, key, count in cast(
457 List[Tuple[str, str, str, int]], txn
458 ):
459 event_results = result.setdefault(event_id, [])
460
461 # Limit the number of results per event ID.
462 if len(event_results) == limit:
463 continue
464
465 event_results.append({"type": type, "key": key, "count": count})
466
467 return result
439468
440469 return await self.db_pool.runInteraction(
441 "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn
470 "get_aggregation_groups_for_events", _get_aggregation_groups_for_events_txn
442471 )
443472
444473 async def get_aggregation_groups_for_users(
445 self,
446 event_id: str,
447 room_id: str,
448 limit: int,
449 users: FrozenSet[str] = frozenset(),
450 ) -> Dict[Tuple[str, str], int]:
474 self, event_ids: Collection[str], users: FrozenSet[str]
475 ) -> Dict[str, Dict[Tuple[str, str], int]]:
451476 """Fetch the partial aggregations for an event for specific users.
452477
453478 This is used, in conjunction with get_aggregation_groups_for_event, to
454479 remove information from the results for ignored users.
455480
456481 Args:
457 event_id: Fetch events that relate to this event ID.
458 room_id: The room the event belongs to.
459 limit: Only fetch the `limit` groups.
482 event_ids: Fetch events that relate to these event IDs.
460483 users: The users to fetch information for.
461484
462485 Returns:
463 A map of (event type, aggregation key) to a count of users.
486 A map of event ID to a map of (event type, aggregation key) to a
487 count of users.
464488 """
465489
466490 if not users:
467491 return {}
468492
469 args: List[Union[str, int]] = [
470 event_id,
471 room_id,
472 RelationTypes.ANNOTATION,
473 ]
493 events_sql, args = make_in_list_sql_clause(
494 self.database_engine, "relates_to_id", event_ids
495 )
474496
475497 users_sql, users_args = make_in_list_sql_clause(
476 self.database_engine, "sender", users
498 self.database_engine, "annotation.sender", users
477499 )
478500 args.extend(users_args)
501 args.append(RelationTypes.ANNOTATION)
479502
480503 sql = f"""
481 SELECT type, aggregation_key, COUNT(DISTINCT sender)
482 FROM event_relations
483 INNER JOIN events USING (event_id)
484 WHERE relates_to_id = ? AND room_id = ? AND relation_type = ? AND {users_sql}
485 GROUP BY relation_type, type, aggregation_key
486 ORDER BY COUNT(*) DESC
487 LIMIT ?
504 SELECT
505 relates_to_id,
506 annotation.type,
507 aggregation_key,
508 COUNT(DISTINCT annotation.sender)
509 FROM events AS annotation
510 INNER JOIN event_relations USING (event_id)
511 INNER JOIN events AS parent ON
512 parent.event_id = relates_to_id
513 AND parent.room_id = annotation.room_id
514 WHERE {events_sql} AND {users_sql} AND relation_type = ?
515 GROUP BY relates_to_id, annotation.type, aggregation_key
516 ORDER BY relates_to_id, COUNT(*) DESC
488517 """
489518
490519 def _get_aggregation_groups_for_users_txn(
491520 txn: LoggingTransaction,
492 ) -> Dict[Tuple[str, str], int]:
493 txn.execute(sql, args + [limit])
494
495 return {(row[0], row[1]): row[2] for row in txn}
521 ) -> Dict[str, Dict[Tuple[str, str], int]]:
522 txn.execute(sql, args)
523
524 result: Dict[str, Dict[Tuple[str, str], int]] = {}
525 for event_id, type, key, count in cast(
526 List[Tuple[str, str, str, int]], txn
527 ):
528 result.setdefault(event_id, {})[(type, key)] = count
529
530 return result
496531
497532 return await self.db_pool.runInteraction(
498533 "get_aggregation_groups_for_users", _get_aggregation_groups_for_users_txn
534 )
535
536 @cached()
537 async def get_references_for_event(self, event_id: str) -> List[JsonDict]:
538 raise NotImplementedError()
539
540 @cachedList(cached_method_name="get_references_for_event", list_name="event_ids")
541 async def get_references_for_events(
542 self, event_ids: Collection[str]
543 ) -> Mapping[str, Optional[List[_RelatedEvent]]]:
544 """Get a list of references to the given events.
545
546 Args:
547 event_ids: Fetch events that relate to these event IDs.
548
549 Returns:
550 A map of event IDs to a list of related event IDs (and their senders).
551 """
552
553 clause, args = make_in_list_sql_clause(
554 self.database_engine, "relates_to_id", event_ids
555 )
556 args.append(RelationTypes.REFERENCE)
557
558 sql = f"""
559 SELECT relates_to_id, ref.event_id, ref.sender
560 FROM events AS ref
561 INNER JOIN event_relations USING (event_id)
562 INNER JOIN events AS parent ON
563 parent.event_id = relates_to_id
564 AND parent.room_id = ref.room_id
565 WHERE
566 {clause}
567 AND relation_type = ?
568 ORDER BY ref.topological_ordering, ref.stream_ordering
569 """
570
571 def _get_references_for_events_txn(
572 txn: LoggingTransaction,
573 ) -> Mapping[str, List[_RelatedEvent]]:
574 txn.execute(sql, args)
575
576 result: Dict[str, List[_RelatedEvent]] = {}
577 for relates_to_id, event_id, sender in cast(
578 List[Tuple[str, str, str]], txn
579 ):
580 result.setdefault(relates_to_id, []).append(
581 _RelatedEvent(event_id, sender)
582 )
583
584 return result
585
586 return await self.db_pool.runInteraction(
587 "_get_references_for_events_txn", _get_references_for_events_txn
499588 )
500589
501590 @cached()
911911 event_json = db_to_json(content_json)
912912 content = event_json["content"]
913913 content_url = content.get("url")
914 thumbnail_url = content.get("info", {}).get("thumbnail_url")
914 info = content.get("info")
915 if isinstance(info, dict):
916 thumbnail_url = info.get("thumbnail_url")
917 else:
918 thumbnail_url = None
915919
916920 for url in (content_url, thumbnail_url):
917921 if not url:
18421846 "creator": room_creator,
18431847 "has_auth_chain_index": has_auth_chain_index,
18441848 },
1845 # rooms has a unique constraint on room_id, so no need to lock when doing an
1846 # emulated upsert.
1847 lock=False,
18481849 )
18491850
18501851 async def store_partial_state_room(
19651966 "creator": "",
19661967 "has_auth_chain_index": has_auth_chain_index,
19671968 },
1968 # rooms has a unique constraint on room_id, so no need to lock when doing an
1969 # emulated upsert.
1970 lock=False,
19711969 )
19721970
19731971 async def set_room_is_public(self, room_id: str, is_public: bool) -> None:
20562054 Args:
20572055 report_id: ID of reported event in database
20582056 Returns:
2059 event_report: json list of information from event report
2057 JSON dict of information from an event report or None if the
2058 report does not exist.
20602059 """
20612060
20622061 def _get_event_report_txn(
21292128 user_id: search for user_id. Ignored if user_id is None
21302129 room_id: search for room_id. Ignored if room_id is None
21312130 Returns:
2132 event_reports: json list of event reports
2133 count: total number of event reports matching the filter criteria
2131 Tuple of:
2132 json list of event reports
2133 total number of event reports matching the filter criteria
21342134 """
21352135
21362136 def _get_event_reports_paginate_txn(
4343 table="event_to_state_groups",
4444 keyvalues={"event_id": event_id},
4545 values={"state_group": state_group_id, "event_id": event_id},
46 # Unique constraint on event_id so we don't have to lock
47 lock=False,
4846 )
184184 - who should be in the user_directory.
185185
186186 Args:
187 progress (dict)
188 batch_size (int): Maximum number of state events to process
189 per cycle.
187 progress
188 batch_size: Maximum number of state events to process per cycle.
190189
191190 Returns:
192191 number of events processed.
481480 table="user_directory",
482481 keyvalues={"user_id": user_id},
483482 values={"display_name": display_name, "avatar_url": avatar_url},
484 lock=False, # We're only inserter
485483 )
486484
487485 if isinstance(self.database_engine, PostgresEngine):
511509 table="user_directory_search",
512510 keyvalues={"user_id": user_id},
513511 values={"value": value},
514 lock=False, # We're only inserter
515512 )
516513 else:
517514 # This should be unreachable.
707704 Returns the rooms that a user is in.
708705
709706 Args:
710 user_id(str): Must be a local user
707 user_id: Must be a local user
711708
712709 Returns:
713 list: user_id
710 List of room IDs
714711 """
715712 rows = await self.db_pool.simple_select_onecol(
716713 table="users_who_share_private_rooms",
9292
9393 results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups}
9494
95 where_clause, where_args = state_filter.make_sql_filter_clause()
96
97 # Unless the filter clause is empty, we're going to append it after an
98 # existing where clause
99 if where_clause:
100 where_clause = " AND (%s)" % (where_clause,)
101
10295 if isinstance(self.database_engine, PostgresEngine):
10396 # Temporarily disable sequential scans in this transaction. This is
10497 # a temporary hack until we can add the right indices in
109102 # against `state_groups_state` to fetch the latest state.
110103 # It assumes that previous state groups are always numerically
111104 # lesser.
112 # The PARTITION is used to get the event_id in the greatest state
113 # group for the given type, state_key.
114105 # This may return multiple rows per (type, state_key), but last_value
115106 # should be the same.
116107 sql = """
117 WITH RECURSIVE state(state_group) AS (
108 WITH RECURSIVE sgs(state_group) AS (
118109 VALUES(?::bigint)
119110 UNION ALL
120 SELECT prev_state_group FROM state_group_edges e, state s
111 SELECT prev_state_group FROM state_group_edges e, sgs s
121112 WHERE s.state_group = e.state_group
122113 )
123 SELECT DISTINCT ON (type, state_key)
124 type, state_key, event_id
125 FROM state_groups_state
126 WHERE state_group IN (
127 SELECT state_group FROM state
128 ) %s
129 ORDER BY type, state_key, state_group DESC
114 %s
130115 """
116
117 overall_select_query_args: List[Union[int, str]] = []
118
119 # This is an optimization to create a select clause per-condition. This
120 # makes the query planner a lot smarter on what rows should pull out in the
121 # first place and we end up with something that takes 10x less time to get a
122 # result.
123 use_condition_optimization = (
124 not state_filter.include_others and not state_filter.is_full()
125 )
126 state_filter_condition_combos: List[Tuple[str, Optional[str]]] = []
127 # We don't need to caclculate this list if we're not using the condition
128 # optimization
129 if use_condition_optimization:
130 for etype, state_keys in state_filter.types.items():
131 if state_keys is None:
132 state_filter_condition_combos.append((etype, None))
133 else:
134 for state_key in state_keys:
135 state_filter_condition_combos.append((etype, state_key))
136 # And here is the optimization itself. We don't want to do the optimization
137 # if there are too many individual conditions. 10 is an arbitrary number
138 # with no testing behind it but we do know that we specifically made this
139 # optimization for when we grab the necessary state out for
140 # `filter_events_for_client` which just uses 2 conditions
141 # (`EventTypes.RoomHistoryVisibility` and `EventTypes.Member`).
142 if use_condition_optimization and len(state_filter_condition_combos) < 10:
143 select_clause_list: List[str] = []
144 for etype, skey in state_filter_condition_combos:
145 if skey is None:
146 where_clause = "(type = ?)"
147 overall_select_query_args.extend([etype])
148 else:
149 where_clause = "(type = ? AND state_key = ?)"
150 overall_select_query_args.extend([etype, skey])
151
152 select_clause_list.append(
153 f"""
154 (
155 SELECT DISTINCT ON (type, state_key)
156 type, state_key, event_id
157 FROM state_groups_state
158 INNER JOIN sgs USING (state_group)
159 WHERE {where_clause}
160 ORDER BY type, state_key, state_group DESC
161 )
162 """
163 )
164
165 overall_select_clause = " UNION ".join(select_clause_list)
166 else:
167 where_clause, where_args = state_filter.make_sql_filter_clause()
168 # Unless the filter clause is empty, we're going to append it after an
169 # existing where clause
170 if where_clause:
171 where_clause = " AND (%s)" % (where_clause,)
172
173 overall_select_query_args.extend(where_args)
174
175 overall_select_clause = f"""
176 SELECT DISTINCT ON (type, state_key)
177 type, state_key, event_id
178 FROM state_groups_state
179 WHERE state_group IN (
180 SELECT state_group FROM sgs
181 ) {where_clause}
182 ORDER BY type, state_key, state_group DESC
183 """
131184
132185 for group in groups:
133186 args: List[Union[int, str]] = [group]
134 args.extend(where_args)
135
136 txn.execute(sql % (where_clause,), args)
187 args.extend(overall_select_query_args)
188
189 txn.execute(sql % (overall_select_clause,), args)
137190 for row in txn:
138191 typ, state_key, event_id = row
139192 key = (intern_string(typ), intern_string(state_key))
140193 results[group][key] = event_id
141194 else:
142195 max_entries_returned = state_filter.max_entries_returned()
196
197 where_clause, where_args = state_filter.make_sql_filter_clause()
198 # Unless the filter clause is empty, we're going to append it after an
199 # existing where clause
200 if where_clause:
201 where_clause = " AND (%s)" % (where_clause,)
143202
144203 # We don't use WITH RECURSIVE on sqlite3 as there are distributions
145204 # that ship with an sqlite3 version that doesn't support it (e.g. wheezy)
0 /* Copyright 2022 The Matrix.org Foundation C.I.C
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 -- Prior to this schema delta, we tracked the set of unconverted rows in
16 -- `device_lists_changes_in_room` using the `converted_to_destinations` flag. When rows
17 -- were converted to `device_lists_outbound_pokes`, the `converted_to_destinations` flag
18 -- would be set.
19 --
20 -- After this schema delta, the `converted_to_destinations` is still populated like
21 -- before, but the set of unconverted rows is determined by the `stream_id` in the new
22 -- `device_lists_changes_converted_stream_position` table.
23 --
24 -- If rolled back, Synapse will re-send all device list changes that happened since the
25 -- schema delta.
26
27 CREATE TABLE IF NOT EXISTS device_lists_changes_converted_stream_position(
28 Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row.
29 -- The (stream id, room id) of the last row in `device_lists_changes_in_room` that
30 -- has been converted to `device_lists_outbound_pokes`. Rows with a strictly larger
31 -- (stream id, room id) where `converted_to_destinations` is `FALSE` have not been
32 -- converted.
33 stream_id BIGINT NOT NULL,
34 -- `room_id` may be an empty string, which compares less than all valid room IDs.
35 room_id TEXT NOT NULL,
36 CHECK (Lock='X')
37 );
38
39 INSERT INTO device_lists_changes_converted_stream_position (stream_id, room_id) VALUES (
40 (
41 SELECT COALESCE(
42 -- The last converted stream id is the smallest unconverted stream id minus
43 -- one.
44 MIN(stream_id) - 1,
45 -- If there is no unconverted stream id, the last converted stream id is the
46 -- largest stream id.
47 -- Otherwise, pick 1, since stream ids start at 2.
48 (SELECT COALESCE(MAX(stream_id), 1) FROM device_lists_changes_in_room)
49 ) FROM device_lists_changes_in_room WHERE NOT converted_to_destinations
50 ),
51 ''
52 );
0 /* Copyright 2022 The Matrix.org Foundation C.I.C
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15
16 -- Adds an index on `device_lists_changes_in_room (room_id, stream_id)`, which
17 -- speeds up `/sync` queries.
18 INSERT INTO background_updates (ordering, update_name, progress_json) VALUES
19 (7313, 'device_lists_changes_in_room_by_room_index', '{}');
185185 column: str,
186186 extra_tables: Iterable[Tuple[str, str]] = (),
187187 step: int = 1,
188 is_writer: bool = True,
188189 ) -> None:
189190 assert step != 0
190191 self._lock = threading.Lock()
191192 self._step: int = step
192193 self._current: int = _load_current_id(db_conn, table, column, step)
194 self._is_writer = is_writer
193195 for table, column in extra_tables:
194196 self._current = (max if step > 0 else min)(
195197 self._current, _load_current_id(db_conn, table, column, step)
203205 self._unfinished_ids: OrderedDict[int, int] = OrderedDict()
204206
205207 def advance(self, instance_name: str, new_id: int) -> None:
206 # `StreamIdGenerator` should only be used when there is a single writer,
207 # so replication should never happen.
208 raise Exception("Replication is not supported by StreamIdGenerator")
208 # Advance should never be called on a writer instance, only over replication
209 if self._is_writer:
210 raise Exception("Replication is not supported by writer StreamIdGenerator")
211
212 self._current = (max if self._step > 0 else min)(self._current, new_id)
209213
210214 def get_next(self) -> AsyncContextManager[int]:
211215 with self._lock:
248252 return _AsyncCtxManagerWrapper(manager())
249253
250254 def get_current_token(self) -> int:
255 if not self._is_writer:
256 return self._current
257
251258 with self._lock:
252259 if self._unfinished_ids:
253260 return next(iter(self._unfinished_ids)) - self._step
7373 return token
7474
7575 @trace
76 async def get_start_token_for_pagination(self, room_id: str) -> StreamToken:
77 """Get the start token for a given room to be used to paginate
78 events.
79
80 The returned token does not have the current values for fields other
81 than `room`, since they are not used during pagination.
82
83 Returns:
84 The start token for pagination.
85 """
86 return StreamToken.START
87
88 @trace
7689 async def get_current_token_for_pagination(self, room_id: str) -> StreamToken:
7790 """Get the current token for a given room to be used to paginate
7891 events.
142142 Requester.
143143
144144 Args:
145 store (DataStore): Used to convert AS ID to AS object
146 input (dict): A dict produced by `serialize`
145 store: Used to convert AS ID to AS object
146 input: A dict produced by `serialize`
147147
148148 Returns:
149149 Requester
216216 limit: Maximum number of conccurent executions.
217217
218218 Returns:
219 Deferred: Resolved when all function invocations have finished.
219 None, when all function invocations have finished. The return values
220 from those functions are discarded.
220221 """
221222 it = iter(args)
222223
196196 resize_callback: A function which can be called to resize the cache.
197197
198198 Returns:
199 CacheMetric: an object which provides inc_{hits,misses,evictions} methods
199 an object which provides inc_{hits,misses,evictions} methods
200200 """
201201 if resizable:
202202 if not resize_callback:
152152 Args:
153153 key:
154154 callback: Gets called when the entry in the cache is invalidated
155 update_metrics (bool): whether to update the cache hit rate metrics
155 update_metrics: whether to update the cache hit rate metrics
156156
157157 Returns:
158158 A Deferred which completes with the result. Note that this may later fail
502502 is specified as a list that is iterated through to lookup keys in the
503503 original cache. A new tuple consisting of the (deduplicated) keys that weren't in
504504 the cache gets passed to the original function, which is expected to results
505 in a map of key to value for each passed value. THe new results are stored in the
505 in a map of key to value for each passed value. The new results are stored in the
506506 original cache. Note that any missing values are cached as None.
507507
508508 Args:
168168 if it is in the cache.
169169
170170 Returns:
171 DictionaryEntry: If `dict_keys` is not None then `DictionaryEntry`
172 will contain include the keys that are in the cache. If None then
173 will either return the full dict if in the cache, or the empty
174 dict (with `full` set to False) if it isn't.
171 If `dict_keys` is not None then `DictionaryEntry` will contain include
172 the keys that are in the cache.
173
174 If None then will either return the full dict if in the cache, or the
175 empty dict (with `full` set to False) if it isn't.
175176 """
176177 if dict_keys is None:
177178 # The caller wants the full set of dictionary keys for this cache key
206206 items from the cache.
207207
208208 Returns:
209 bool: Whether the cache changed size or not.
209 Whether the cache changed size or not.
210210 """
211211 new_size = int(self._original_max_size * factor)
212212 if new_size != self._max_size:
388388 cache_name: The name of this cache, for the prometheus metrics. If unset,
389389 no metrics will be reported on this cache.
390390
391 cache_type (type):
391 cache_type:
392392 type of underlying cache to be used. Typically one of dict
393393 or TreeCache.
394394
395 size_callback (func(V) -> int | None):
395 size_callback:
396396
397397 metrics_collection_callback:
398398 metrics collection callback. This is called early in the metrics
402402
403403 Ignored if cache_name is None.
404404
405 apply_cache_factor_from_config (bool): If true, `max_size` will be
405 apply_cache_factor_from_config: If true, `max_size` will be
406406 multiplied by a cache factor derived from the homeserver config
407407
408408 clock:
795795 items from the cache.
796796
797797 Returns:
798 bool: Whether the cache changed size or not.
798 Whether the cache changed size or not.
799799 """
800800 if not self.apply_cache_factor_from_config:
801801 return False
182182 # Handle request ...
183183
184184 Args:
185 host (str): Origin of incoming request.
185 host: Origin of incoming request.
186186
187187 Returns:
188188 context manager which returns a deferred.
4747 registration: whether we want to bind the 3PID as part of registering a new user.
4848
4949 Returns:
50 bool: whether the 3PID medium/address is allowed to be added to this HS
50 whether the 3PID medium/address is allowed to be added to this HS
5151 """
5252 if not await hs.get_password_auth_provider().is_3pid_allowed(
5353 medium, address, registration
8989 """Fetch any objects that have timed out
9090
9191 Args:
92 now (ms): Current time in msec
92 now: Current time in msec
9393
9494 Returns:
95 list: List of objects that have timed out
95 List of objects that have timed out
9696 """
9797 now_key = int(now / self.bucket_size)
9898
562562
563563 async def filter_events_for_server(
564564 storage: StorageControllers,
565 server_name: str,
565 target_server_name: str,
566 local_server_name: str,
566567 events: List[EventBase],
567568 redact: bool = True,
568569 check_history_visibility_only: bool = False,
602603 # if the server is either in the room or has been invited
603604 # into the room.
604605 for ev in memberships.values():
605 assert get_domain_from_id(ev.state_key) == server_name
606 assert get_domain_from_id(ev.state_key) == target_server_name
606607
607608 memtype = ev.membership
608609 if memtype == Membership.JOIN:
620621 # We don't want to check whether users are erased, which is equivalent
621622 # to no users having been erased.
622623 erased_senders = {}
624
625 # Filter out non-local events when we are in the middle of a partial join, since our servers
626 # list can be out of date and we could leak events to servers not in the room anymore.
627 # This can also be true for local events but we consider it to be an acceptable risk.
628
629 # We do this check as a first step and before retrieving membership events because
630 # otherwise a room could be fully joined after we retrieve those, which would then bypass
631 # this check but would base the filtering on an outdated view of the membership events.
632
633 partial_state_invisible_events = set()
634 if not check_history_visibility_only:
635 for e in events:
636 sender_domain = get_domain_from_id(e.sender)
637 if (
638 sender_domain != local_server_name
639 and await storage.main.is_partial_state_room(e.room_id)
640 ):
641 partial_state_invisible_events.add(e)
623642
624643 # Let's check to see if all the events have a history visibility
625644 # of "shared" or "world_readable". If that's the case then we don't
635654 if event_to_history_vis[e.event_id]
636655 not in (HistoryVisibility.SHARED, HistoryVisibility.WORLD_READABLE)
637656 ],
638 server_name,
657 target_server_name,
639658 )
640659
641660 to_return = []
644663 visible = check_event_is_visible(
645664 event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {})
646665 )
666
667 if e in partial_state_invisible_events:
668 visible = False
669
647670 if visible and not erased:
648671 to_return.append(e)
649672 elif redact:
6868 events=events,
6969 ephemeral=[],
7070 to_device_messages=[], # txn made and saved
71 one_time_key_counts={},
71 one_time_keys_count={},
7272 unused_fallback_keys={},
7373 device_list_summary=DeviceListUpdates(),
7474 )
9595 events=events,
9696 ephemeral=[],
9797 to_device_messages=[], # txn made and saved
98 one_time_key_counts={},
98 one_time_keys_count={},
9999 unused_fallback_keys={},
100100 device_list_summary=DeviceListUpdates(),
101101 )
124124 events=events,
125125 ephemeral=[],
126126 to_device_messages=[],
127 one_time_key_counts={},
127 one_time_keys_count={},
128128 unused_fallback_keys={},
129129 device_list_summary=DeviceListUpdates(),
130130 )
468468 keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0))
469469 self.assertEqual(keys, {})
470470
471 def test_keyid_containing_forward_slash(self) -> None:
472 """We should url-encode any url unsafe chars in key ids.
473
474 Detects https://github.com/matrix-org/synapse/issues/14488.
475 """
476 fetcher = ServerKeyFetcher(self.hs)
477 self.get_success(fetcher.get_keys("example.com", ["key/potato"], 0))
478
479 self.http_client.get_json.assert_called_once()
480 args, kwargs = self.http_client.get_json.call_args
481 self.assertEqual(kwargs["path"], "/_matrix/key/v2/server/key%2Fpotato")
482
471483
472484 class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase):
473485 def make_homeserver(self, reactor, clock):
8383 )
8484
8585 @override_config({"send_federation": True})
86 def test_send_receipts_thread(self):
87 mock_send_transaction = (
88 self.hs.get_federation_transport_client().send_transaction
89 )
90 mock_send_transaction.return_value = make_awaitable({})
91
92 # Create receipts for:
93 #
94 # * The same room / user on multiple threads.
95 # * A different user in the same room.
96 sender = self.hs.get_federation_sender()
97 for user, thread in (
98 ("alice", None),
99 ("alice", "thread"),
100 ("bob", None),
101 ("bob", "diff-thread"),
102 ):
103 receipt = ReadReceipt(
104 "room_id",
105 "m.read",
106 user,
107 ["event_id"],
108 thread_id=thread,
109 data={"ts": 1234},
110 )
111 self.successResultOf(
112 defer.ensureDeferred(sender.send_read_receipt(receipt))
113 )
114
115 self.pump()
116
117 # expect a call to send_transaction with two EDUs to separate threads.
118 mock_send_transaction.assert_called_once()
119 json_cb = mock_send_transaction.call_args[0][1]
120 data = json_cb()
121 # Note that the ordering of the EDUs doesn't matter.
122 self.assertCountEqual(
123 data["edus"],
124 [
125 {
126 "edu_type": EduTypes.RECEIPT,
127 "content": {
128 "room_id": {
129 "m.read": {
130 "alice": {
131 "event_ids": ["event_id"],
132 "data": {"ts": 1234, "thread_id": "thread"},
133 },
134 "bob": {
135 "event_ids": ["event_id"],
136 "data": {"ts": 1234, "thread_id": "diff-thread"},
137 },
138 }
139 }
140 },
141 },
142 {
143 "edu_type": EduTypes.RECEIPT,
144 "content": {
145 "room_id": {
146 "m.read": {
147 "alice": {
148 "event_ids": ["event_id"],
149 "data": {"ts": 1234},
150 },
151 "bob": {
152 "event_ids": ["event_id"],
153 "data": {"ts": 1234},
154 },
155 }
156 }
157 },
158 },
159 ],
160 )
161
162 @override_config({"send_federation": True})
86163 def test_send_receipts_with_backoff(self):
87164 """Send two receipts in quick succession; the second should be flushed, but
88165 only after 20ms"""
2424 from synapse.api.constants import EduTypes, EventTypes
2525 from synapse.appservice import (
2626 ApplicationService,
27 TransactionOneTimeKeyCounts,
27 TransactionOneTimeKeysCount,
2828 TransactionUnusedFallbackKeys,
2929 )
3030 from synapse.handlers.appservice import ApplicationServicesHandler
11221122 # Capture what was sent as an AS transaction.
11231123 self.send_mock.assert_called()
11241124 last_args, _last_kwargs = self.send_mock.call_args
1125 otks: Optional[TransactionOneTimeKeyCounts] = last_args[self.ARG_OTK_COUNTS]
1125 otks: Optional[TransactionOneTimeKeysCount] = last_args[self.ARG_OTK_COUNTS]
11261126 unused_fallbacks: Optional[TransactionUnusedFallbackKeys] = last_args[
11271127 self.ARG_FALLBACK_KEYS
11281128 ]
1818 from twisted.test.proto_helpers import MemoryReactor
1919
2020 from synapse.api.errors import NotFoundError, SynapseError
21 from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN
21 from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN, DeviceHandler
2222 from synapse.server import HomeServer
2323 from synapse.util import Clock
2424
3131 class DeviceTestCase(unittest.HomeserverTestCase):
3232 def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
3333 hs = self.setup_test_homeserver("server", federation_http_client=None)
34 self.handler = hs.get_device_handler()
34 handler = hs.get_device_handler()
35 assert isinstance(handler, DeviceHandler)
36 self.handler = handler
3537 self.store = hs.get_datastores().main
3638 return hs
3739
6062 self.assertEqual(res, "fco")
6163
6264 dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco"))
65 assert dev is not None
6366 self.assertEqual(dev["display_name"], "display name")
6467
6568 def test_device_is_preserved_if_exists(self) -> None:
8285 self.assertEqual(res2, "fco")
8386
8487 dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco"))
88 assert dev is not None
8589 self.assertEqual(dev["display_name"], "display name")
8690
8791 def test_device_id_is_made_up_if_unspecified(self) -> None:
9498 )
9599
96100 dev = self.get_success(self.handler.store.get_device("@theresa:foo", device_id))
101 assert dev is not None
97102 self.assertEqual(dev["display_name"], "display")
98103
99104 def test_get_devices_by_user(self) -> None:
263268 class DehydrationTestCase(unittest.HomeserverTestCase):
264269 def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
265270 hs = self.setup_test_homeserver("server", federation_http_client=None)
266 self.handler = hs.get_device_handler()
271 handler = hs.get_device_handler()
272 assert isinstance(handler, DeviceHandler)
273 self.handler = handler
267274 self.registration = hs.get_registration_handler()
268275 self.auth = hs.get_auth()
269276 self.store = hs.get_datastores().main
283290 )
284291 )
285292
286 retrieved_device_id, device_data = self.get_success(
287 self.handler.get_dehydrated_device(user_id=user_id)
288 )
293 result = self.get_success(self.handler.get_dehydrated_device(user_id=user_id))
294 assert result is not None
295 retrieved_device_id, device_data = result
289296
290297 self.assertEqual(retrieved_device_id, stored_dehydrated_device_id)
291298 self.assertEqual(device_data, {"device_data": {"foo": "bar"}})
1414 from typing import Optional
1515 from unittest.mock import Mock, call
1616
17 from parameterized import parameterized
1718 from signedjson.key import generate_signing_key
1819
1920 from synapse.api.constants import EventTypes, Membership, PresenceState
3637 from synapse.types import UserID, get_domain_from_id
3738
3839 from tests import unittest
40 from tests.replication._base import BaseMultiWorkerStreamTestCase
3941
4042
4143 class PresenceUpdateTestCase(unittest.HomeserverTestCase):
504506 self.assertEqual(state, new_state)
505507
506508
507 class PresenceHandlerTestCase(unittest.HomeserverTestCase):
509 class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase):
508510 def prepare(self, reactor, clock, hs):
509511 self.presence_handler = hs.get_presence_handler()
510512 self.clock = hs.get_clock()
715717 # our status message should be the same as it was before
716718 self.assertEqual(state.status_msg, status_msg)
717719
718 def test_set_presence_from_syncing_keeps_busy(self):
719 """Test that presence set by syncing doesn't affect busy status"""
720 # while this isn't the default
721 self.presence_handler._busy_presence_enabled = True
722
720 @parameterized.expand([(False,), (True,)])
721 @unittest.override_config(
722 {
723 "experimental_features": {
724 "msc3026_enabled": True,
725 },
726 }
727 )
728 def test_set_presence_from_syncing_keeps_busy(self, test_with_workers: bool):
729 """Test that presence set by syncing doesn't affect busy status
730
731 Args:
732 test_with_workers: If True, check the presence state of the user by calling
733 /sync against a worker, rather than the main process.
734 """
723735 user_id = "@test:server"
724736 status_msg = "I'm busy!"
725737
738 # By default, we call /sync against the main process.
739 worker_to_sync_against = self.hs
740 if test_with_workers:
741 # Create a worker and use it to handle /sync traffic instead.
742 # This is used to test that presence changes get replicated from workers
743 # to the main process correctly.
744 worker_to_sync_against = self.make_worker_hs(
745 "synapse.app.generic_worker", {"worker_name": "presence_writer"}
746 )
747
748 # Set presence to BUSY
726749 self._set_presencestate_with_status_msg(user_id, PresenceState.BUSY, status_msg)
727750
751 # Perform a sync with a presence state other than busy. This should NOT change
752 # our presence status; we only change from busy if we explicitly set it via
753 # /presence/*.
728754 self.get_success(
729 self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE)
730 )
731
755 worker_to_sync_against.get_presence_handler().user_syncing(
756 user_id, True, PresenceState.ONLINE
757 )
758 )
759
760 # Check against the main process that the user's presence did not change.
732761 state = self.get_success(
733762 self.presence_handler.get_state(UserID.from_string(user_id))
734763 )
0 # Licensed under the Apache License, Version 2.0 (the "License");
1 # you may not use this file except in compliance with the License.
2 # You may obtain a copy of the License at
3 #
4 # http://www.apache.org/licenses/LICENSE-2.0
5 #
6 # Unless required by applicable law or agreed to in writing, software
7 # distributed under the License is distributed on an "AS IS" BASIS,
8 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9 # See the License for the specific language governing permissions and
10 # limitations under the License.
11 from http import HTTPStatus
12 from typing import BinaryIO, Callable, Dict, List, Optional, Tuple
13 from unittest.mock import Mock
14
15 from twisted.test.proto_helpers import MemoryReactor
16 from twisted.web.http_headers import Headers
17
18 from synapse.api.errors import Codes, SynapseError
19 from synapse.http.client import RawHeaders
20 from synapse.server import HomeServer
21 from synapse.util import Clock
22
23 from tests import unittest
24 from tests.test_utils import SMALL_PNG, FakeResponse
25
26
27 class TestSSOHandler(unittest.HomeserverTestCase):
28 def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer:
29 self.http_client = Mock(spec=["get_file"])
30 self.http_client.get_file.side_effect = mock_get_file
31 self.http_client.user_agent = b"Synapse Test"
32 hs = self.setup_test_homeserver(
33 proxied_blacklisted_http_client=self.http_client
34 )
35 return hs
36
37 async def test_set_avatar(self) -> None:
38 """Tests successfully setting the avatar of a newly created user"""
39 handler = self.hs.get_sso_handler()
40
41 # Create a new user to set avatar for
42 reg_handler = self.hs.get_registration_handler()
43 user_id = self.get_success(reg_handler.register_user(approved=True))
44
45 self.assertTrue(
46 self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
47 )
48
49 # Ensure avatar is set on this newly created user,
50 # so no need to compare for the exact image
51 profile_handler = self.hs.get_profile_handler()
52 profile = self.get_success(profile_handler.get_profile(user_id))
53 self.assertIsNot(profile["avatar_url"], None)
54
55 @unittest.override_config({"max_avatar_size": 1})
56 async def test_set_avatar_too_big_image(self) -> None:
57 """Tests that saving an avatar fails when it is too big"""
58 handler = self.hs.get_sso_handler()
59
60 # any random user works since image check is supposed to fail
61 user_id = "@sso-user:test"
62
63 self.assertFalse(
64 self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
65 )
66
67 @unittest.override_config({"allowed_avatar_mimetypes": ["image/jpeg"]})
68 async def test_set_avatar_incorrect_mime_type(self) -> None:
69 """Tests that saving an avatar fails when its mime type is not allowed"""
70 handler = self.hs.get_sso_handler()
71
72 # any random user works since image check is supposed to fail
73 user_id = "@sso-user:test"
74
75 self.assertFalse(
76 self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
77 )
78
79 async def test_skip_saving_avatar_when_not_changed(self) -> None:
80 """Tests whether saving of avatar correctly skips if the avatar hasn't
81 changed"""
82 handler = self.hs.get_sso_handler()
83
84 # Create a new user to set avatar for
85 reg_handler = self.hs.get_registration_handler()
86 user_id = self.get_success(reg_handler.register_user(approved=True))
87
88 # set avatar for the first time, should be a success
89 self.assertTrue(
90 self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
91 )
92
93 # get avatar picture for comparison after another attempt
94 profile_handler = self.hs.get_profile_handler()
95 profile = self.get_success(profile_handler.get_profile(user_id))
96 url_to_match = profile["avatar_url"]
97
98 # set same avatar for the second time, should be a success
99 self.assertTrue(
100 self.get_success(handler.set_avatar(user_id, "http://my.server/me.png"))
101 )
102
103 # compare avatar picture's url from previous step
104 profile = self.get_success(profile_handler.get_profile(user_id))
105 self.assertEqual(profile["avatar_url"], url_to_match)
106
107
108 async def mock_get_file(
109 url: str,
110 output_stream: BinaryIO,
111 max_size: Optional[int] = None,
112 headers: Optional[RawHeaders] = None,
113 is_allowed_content_type: Optional[Callable[[str], bool]] = None,
114 ) -> Tuple[int, Dict[bytes, List[bytes]], str, int]:
115
116 fake_response = FakeResponse(code=404)
117 if url == "http://my.server/me.png":
118 fake_response = FakeResponse(
119 code=200,
120 headers=Headers(
121 {"Content-Type": ["image/png"], "Content-Length": [str(len(SMALL_PNG))]}
122 ),
123 body=SMALL_PNG,
124 )
125
126 if max_size is not None and max_size < len(SMALL_PNG):
127 raise SynapseError(
128 HTTPStatus.BAD_GATEWAY,
129 "Requested file is too large > %r bytes" % (max_size,),
130 Codes.TOO_LARGE,
131 )
132
133 if is_allowed_content_type and not is_allowed_content_type("image/png"):
134 raise SynapseError(
135 HTTPStatus.BAD_GATEWAY,
136 (
137 "Requested file's content type not allowed for this operation: %s"
138 % "image/png"
139 ),
140 )
141
142 output_stream.write(fake_response.body)
143
144 return len(SMALL_PNG), {b"Content-Type": [b"image/png"]}, "", 200
1212 # limitations under the License.
1313 import os.path
1414 import subprocess
15 from typing import List
1516
1617 from zope.interface import implementer
1718
6970 """
7071
7172
72 def create_test_cert_file(sanlist):
73 def create_test_cert_file(sanlist: List[bytes]) -> str:
7374 """build an x509 certificate file
7475
7576 Args:
76 sanlist: list[bytes]: a list of subjectAltName values for the cert
77 sanlist: a list of subjectAltName values for the cert
7778
7879 Returns:
79 str: the path to the file
80 The path to the file
8081 """
8182 global cert_file_count
8283 csr_filename = "server.csr"
777777 worker process. The test users will still sync with the main process. The purpose of testing
778778 with a worker is to check whether a Synapse module running on a worker can inform other workers/
779779 the main process that they should include additional presence when a user next syncs.
780 If this argument is True, `test_case` MUST be an instance of BaseMultiWorkerStreamTestCase.
780781 """
781782 if test_with_workers:
783 assert isinstance(test_case, BaseMultiWorkerStreamTestCase)
784
782785 # Create a worker process to make module_api calls against
783786 worker_hs = test_case.make_worker_hs(
784787 "synapse.app.generic_worker", {"worker_name": "presence_writer"}
6060 sender_power_level,
6161 power_levels.get("notifications", {}),
6262 {} if related_events is None else related_events,
63 True,
64 event.room_version.msc3931_push_features,
6365 True,
6466 )
6567
541541 self.send("OK")
542542 elif command == b"GET":
543543 self.send(None)
544
545 # Connection keep-alives.
546 elif command == b"PING":
547 self.send("PONG")
548
544549 else:
545 raise Exception("Unknown command")
550 raise Exception(f"Unknown command: {command}")
546551
547552 def send(self, msg):
548553 """Send a message back to the client."""
142142 self.persist(type="m.room.create", key="", creator=USER_ID)
143143 self.check("get_invited_rooms_for_local_user", [USER_ID_2], [])
144144 event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite")
145 assert event.internal_metadata.stream_ordering is not None
145146
146147 self.replicate()
147148
229230 j2 = self.persist(
230231 type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
231232 )
233 assert j2.internal_metadata.stream_ordering is not None
232234 self.replicate()
233235
234236 expected_pos = PersistedEventPosition(
286288 )
287289 )
288290 self.replicate()
291 assert j2.internal_metadata.stream_ordering is not None
289292
290293 event_source = RoomEventSource(self.hs)
291294 event_source.store = self.slaved_store
335338
336339 event_id = 0
337340
338 def persist(self, backfill=False, **kwargs):
341 def persist(self, backfill=False, **kwargs) -> FrozenEvent:
339342 """
340343 Returns:
341 synapse.events.FrozenEvent: The event that was persisted.
344 The event that was persisted.
342345 """
343346 event, context = self.build_event(**kwargs)
344347
1414 import os
1515 from typing import Optional, Tuple
1616
17 from twisted.internet.interfaces import IOpenSSLServerConnectionCreator
1718 from twisted.internet.protocol import Factory
18 from twisted.protocols.tls import TLSMemoryBIOFactory
19 from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol
1920 from twisted.web.http import HTTPChannel
2021 from twisted.web.server import Request
2122
101102 )
102103
103104 # fish the test server back out of the server-side TLS protocol.
104 http_server = server_tls_protocol.wrappedProtocol
105 http_server: HTTPChannel = server_tls_protocol.wrappedProtocol # type: ignore[assignment]
105106
106107 # give the reactor a pump to get the TLS juices flowing.
107108 self.reactor.pump((0.1,))
237238 return test_server_connection_factory
238239
239240
240 def _build_test_server(connection_creator):
241 def _build_test_server(
242 connection_creator: IOpenSSLServerConnectionCreator,
243 ) -> TLSMemoryBIOProtocol:
241244 """Construct a test server
242245
243246 This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol
244247
245248 Args:
246 connection_creator (IOpenSSLServerConnectionCreator): thing to build
247 SSL connections
248 sanlist (list[bytes]): list of the SAN entries for the cert returned
249 by the server
249 connection_creator: thing to build SSL connections
250250
251251 Returns:
252252 TLSMemoryBIOProtocol
1818
1919 import synapse.rest.admin
2020 from synapse.api.errors import Codes
21 from synapse.handlers.device import DeviceHandler
2122 from synapse.rest.client import login
2223 from synapse.server import HomeServer
2324 from synapse.util import Clock
3334 ]
3435
3536 def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
36 self.handler = hs.get_device_handler()
37 handler = hs.get_device_handler()
38 assert isinstance(handler, DeviceHandler)
39 self.handler = handler
3740
3841 self.admin_user = self.register_user("admin", "pass", admin=True)
3942 self.admin_user_tok = self.login("admin", "pass")
18551855 self.assertEqual(token, channel.json_body["start"])
18561856 self.assertIn("chunk", channel.json_body)
18571857 self.assertIn("end", channel.json_body)
1858
1859 def test_room_messages_backward(self) -> None:
1860 """Test room messages can be retrieved by an admin that isn't in the room."""
1861 latest_event_id = self.helper.send(
1862 self.room_id, body="message 1", tok=self.user_tok
1863 )["event_id"]
1864
1865 # Check that we get the first and second message when querying /messages.
1866 channel = self.make_request(
1867 "GET",
1868 "/_synapse/admin/v1/rooms/%s/messages?dir=b" % (self.room_id,),
1869 access_token=self.admin_user_tok,
1870 )
1871 self.assertEqual(channel.code, 200, channel.json_body)
1872
1873 chunk = channel.json_body["chunk"]
1874 self.assertEqual(len(chunk), 6, [event["content"] for event in chunk])
1875
1876 # in backwards, this is the first event
1877 self.assertEqual(chunk[0]["event_id"], latest_event_id)
1878
1879 def test_room_messages_forward(self) -> None:
1880 """Test room messages can be retrieved by an admin that isn't in the room."""
1881 latest_event_id = self.helper.send(
1882 self.room_id, body="message 1", tok=self.user_tok
1883 )["event_id"]
1884
1885 # Check that we get the first and second message when querying /messages.
1886 channel = self.make_request(
1887 "GET",
1888 "/_synapse/admin/v1/rooms/%s/messages?dir=f" % (self.room_id,),
1889 access_token=self.admin_user_tok,
1890 )
1891 self.assertEqual(channel.code, 200, channel.json_body)
1892
1893 chunk = channel.json_body["chunk"]
1894 self.assertEqual(len(chunk), 6, [event["content"] for event in chunk])
1895
1896 # in forward, this is the last event
1897 self.assertEqual(chunk[5]["event_id"], latest_event_id)
18581898
18591899 def test_room_messages_purge(self) -> None:
18601900 """Test room messages can be retrieved by an admin that isn't in the room."""
11071107
11081108 # The "user" sent the root event and is making queries for the bundled
11091109 # aggregations: they have participated.
1110 self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 9)
1110 self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 7)
11111111 # The "user2" sent replies in the thread and is making queries for the
11121112 # bundled aggregations: they have participated.
11131113 #
11691169 bundled_aggregations["latest_event"].get("unsigned"),
11701170 )
11711171
1172 self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 9)
1172 self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 7)
11731173
11741174 def test_nested_thread(self) -> None:
11751175 """
35453545 login.register_servlets,
35463546 ]
35473547
3548 def default_config(self) -> JsonDict:
3549 config = super().default_config()
3550 config["experimental_features"] = {"msc3030_enabled": True}
3551 return config
3552
35533548 def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
35543549 self._storage_controllers = self.hs.get_storage_controllers()
35553550
35913586
35923587 channel = self.make_request(
35933588 "GET",
3594 f"/_matrix/client/unstable/org.matrix.msc3030/rooms/{room_id}/timestamp_to_event?dir=b&ts={outlier_event.origin_server_ts}",
3589 f"/_matrix/client/v1/rooms/{room_id}/timestamp_to_event?dir=b&ts={outlier_event.origin_server_ts}",
35953590 access_token=self.room_owner_tok,
35963591 )
35973592 self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body)
1010 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1111 # See the License for the specific language governing permissions and
1212 # limitations under the License.
13 from typing import Tuple
1314 from unittest.mock import Mock
1415
1516 from twisted.test.proto_helpers import MemoryReactor
349350
350351 self.assertTrue(notice_in_room, "No server notice in room")
351352
352 def _trigger_notice_and_join(self):
353 def _trigger_notice_and_join(self) -> Tuple[str, str, str]:
353354 """Creates enough active users to hit the MAU limit and trigger a system notice
354355 about it, then joins the system notices room with one of the users created.
355356
356357 Returns:
357 user_id (str): The ID of the user that joined the room.
358 tok (str): The access token of the user that joined the room.
359 room_id (str): The ID of the room that's been joined.
358 A tuple of:
359 user_id: The ID of the user that joined the room.
360 tok: The access token of the user that joined the room.
361 room_id: The ID of the room that's been joined.
360362 """
361363 user_id = None
362364 tok = None
2727 """
2828
2929 for device_id in device_ids:
30 stream_id = self.get_success(
30 self.get_success(
3131 self.store.add_device_change_to_streams(
3232 user_id, [device_id], ["!some:room"]
3333 )
3838 user_id=user_id,
3939 device_id=device_id,
4040 room_id="!some:room",
41 stream_id=stream_id,
4241 hosts=[host],
4342 context={},
4443 )
1010 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1111 # See the License for the specific language governing permissions and
1212 # limitations under the License.
13 from prometheus_client import generate_latest
1314
14 from synapse.metrics import REGISTRY, generate_latest
15 from synapse.metrics import REGISTRY
1516 from synapse.types import UserID, create_requester
1617
1718 from tests.unittest import HomeserverTestCase
5253
5354 items = list(
5455 filter(
55 lambda x: b"synapse_forward_extremities_" in x,
56 generate_latest(REGISTRY, emit_help=False).split(b"\n"),
56 lambda x: b"synapse_forward_extremities_" in x and b"# HELP" not in x,
57 generate_latest(REGISTRY).split(b"\n"),
5758 )
5859 )
5960
1515 from twisted.test.proto_helpers import MemoryReactor
1616
1717 from synapse.server import HomeServer
18 from synapse.storage.database import DatabasePool, LoggingTransaction
18 from synapse.storage.database import (
19 DatabasePool,
20 LoggingDatabaseConnection,
21 LoggingTransaction,
22 )
1923 from synapse.storage.engines import IncorrectDatabaseSetup
20 from synapse.storage.util.id_generators import MultiWriterIdGenerator
24 from synapse.storage.types import Cursor
25 from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
2126 from synapse.util import Clock
2227
2328 from tests.unittest import HomeserverTestCase
2429 from tests.utils import USE_POSTGRES_FOR_TESTS
30
31
32 class StreamIdGeneratorTestCase(HomeserverTestCase):
33 def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None:
34 self.store = hs.get_datastores().main
35 self.db_pool: DatabasePool = self.store.db_pool
36
37 self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db))
38
39 def _setup_db(self, txn: LoggingTransaction) -> None:
40 txn.execute(
41 """
42 CREATE TABLE foobar (
43 stream_id BIGINT NOT NULL,
44 data TEXT
45 );
46 """
47 )
48 txn.execute("INSERT INTO foobar VALUES (123, 'hello world');")
49
50 def _create_id_generator(self) -> StreamIdGenerator:
51 def _create(conn: LoggingDatabaseConnection) -> StreamIdGenerator:
52 return StreamIdGenerator(
53 db_conn=conn,
54 table="foobar",
55 column="stream_id",
56 )
57
58 return self.get_success_or_raise(self.db_pool.runWithConnection(_create))
59
60 def test_initial_value(self) -> None:
61 """Check that we read the current token from the DB."""
62 id_gen = self._create_id_generator()
63 self.assertEqual(id_gen.get_current_token(), 123)
64
65 def test_single_gen_next(self) -> None:
66 """Check that we correctly increment the current token from the DB."""
67 id_gen = self._create_id_generator()
68
69 async def test_gen_next() -> None:
70 async with id_gen.get_next() as next_id:
71 # We haven't persisted `next_id` yet; current token is still 123
72 self.assertEqual(id_gen.get_current_token(), 123)
73 # But we did learn what the next value is
74 self.assertEqual(next_id, 124)
75
76 # Once the context manager closes we assume that the `next_id` has been
77 # written to the DB.
78 self.assertEqual(id_gen.get_current_token(), 124)
79
80 self.get_success(test_gen_next())
81
82 def test_multiple_gen_nexts(self) -> None:
83 """Check that we handle overlapping calls to gen_next sensibly."""
84 id_gen = self._create_id_generator()
85
86 async def test_gen_next() -> None:
87 ctx1 = id_gen.get_next()
88 ctx2 = id_gen.get_next()
89 ctx3 = id_gen.get_next()
90
91 # Request three new stream IDs.
92 self.assertEqual(await ctx1.__aenter__(), 124)
93 self.assertEqual(await ctx2.__aenter__(), 125)
94 self.assertEqual(await ctx3.__aenter__(), 126)
95
96 # None are persisted: current token unchanged.
97 self.assertEqual(id_gen.get_current_token(), 123)
98
99 # Persist each in turn.
100 await ctx1.__aexit__(None, None, None)
101 self.assertEqual(id_gen.get_current_token(), 124)
102 await ctx2.__aexit__(None, None, None)
103 self.assertEqual(id_gen.get_current_token(), 125)
104 await ctx3.__aexit__(None, None, None)
105 self.assertEqual(id_gen.get_current_token(), 126)
106
107 self.get_success(test_gen_next())
108
109 def test_multiple_gen_nexts_closed_in_different_order(self) -> None:
110 """Check that we handle overlapping calls to gen_next, even when their IDs
111 created and persisted in different orders."""
112 id_gen = self._create_id_generator()
113
114 async def test_gen_next() -> None:
115 ctx1 = id_gen.get_next()
116 ctx2 = id_gen.get_next()
117 ctx3 = id_gen.get_next()
118
119 # Request three new stream IDs.
120 self.assertEqual(await ctx1.__aenter__(), 124)
121 self.assertEqual(await ctx2.__aenter__(), 125)
122 self.assertEqual(await ctx3.__aenter__(), 126)
123
124 # None are persisted: current token unchanged.
125 self.assertEqual(id_gen.get_current_token(), 123)
126
127 # Persist them in a different order, starting with 126 from ctx3.
128 await ctx3.__aexit__(None, None, None)
129 # We haven't persisted 124 from ctx1 yet---current token is still 123.
130 self.assertEqual(id_gen.get_current_token(), 123)
131
132 # Now persist 124 from ctx1.
133 await ctx1.__aexit__(None, None, None)
134 # Current token is then 124, waiting for 125 to be persisted.
135 self.assertEqual(id_gen.get_current_token(), 124)
136
137 # Finally persist 125 from ctx2.
138 await ctx2.__aexit__(None, None, None)
139 # Current token is then 126 (skipping over 125).
140 self.assertEqual(id_gen.get_current_token(), 126)
141
142 self.get_success(test_gen_next())
143
144 def test_gen_next_while_still_waiting_for_persistence(self) -> None:
145 """Check that we handle overlapping calls to gen_next."""
146 id_gen = self._create_id_generator()
147
148 async def test_gen_next() -> None:
149 ctx1 = id_gen.get_next()
150 ctx2 = id_gen.get_next()
151 ctx3 = id_gen.get_next()
152
153 # Request two new stream IDs.
154 self.assertEqual(await ctx1.__aenter__(), 124)
155 self.assertEqual(await ctx2.__aenter__(), 125)
156
157 # Persist ctx2 first.
158 await ctx2.__aexit__(None, None, None)
159 # Still waiting on ctx1's ID to be persisted.
160 self.assertEqual(id_gen.get_current_token(), 123)
161
162 # Now request a third stream ID. It should be 126 (the smallest ID that
163 # we've not yet handed out.)
164 self.assertEqual(await ctx3.__aenter__(), 126)
165
166 self.get_success(test_gen_next())
25167
26168
27169 class MultiWriterIdGeneratorTestCase(HomeserverTestCase):
47189 )
48190
49191 def _create_id_generator(
50 self, instance_name="master", writers: Optional[List[str]] = None
192 self, instance_name: str = "master", writers: Optional[List[str]] = None
51193 ) -> MultiWriterIdGenerator:
52 def _create(conn):
194 def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
53195 return MultiWriterIdGenerator(
54196 conn,
55197 self.db_pool,
445587 self._insert_row_with_id("master", 3)
446588
447589 # Now we add a row *without* updating the stream ID
448 def _insert(txn):
590 def _insert(txn: Cursor) -> None:
449591 txn.execute("INSERT INTO foobar VALUES (26, 'master')")
450592
451593 self.get_success(self.db_pool.runInteraction("_insert", _insert))
480622 )
481623
482624 def _create_id_generator(
483 self, instance_name="master", writers: Optional[List[str]] = None
625 self, instance_name: str = "master", writers: Optional[List[str]] = None
484626 ) -> MultiWriterIdGenerator:
485 def _create(conn):
627 def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
486628 return MultiWriterIdGenerator(
487629 conn,
488630 self.db_pool,
616758 )
617759
618760 def _create_id_generator(
619 self, instance_name="master", writers: Optional[List[str]] = None
761 self, instance_name: str = "master", writers: Optional[List[str]] = None
620762 ) -> MultiWriterIdGenerator:
621 def _create(conn):
763 def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator:
622764 return MultiWriterIdGenerator(
623765 conn,
624766 self.db_pool,
640782 instance_name: str,
641783 number: int,
642784 update_stream_table: bool = True,
643 ):
785 ) -> None:
644786 """Insert N rows as the given instance, inserting with stream IDs pulled
645787 from the postgres sequence.
646788 """
6060
6161 filtered = self.get_success(
6262 filter_events_for_server(
63 self._storage_controllers, "test_server", events_to_filter
63 self._storage_controllers, "test_server", "hs", events_to_filter
6464 )
6565 )
6666
8282 self.assertEqual(
8383 self.get_success(
8484 filter_events_for_server(
85 self._storage_controllers, "remote_hs", [outlier]
85 self._storage_controllers, "remote_hs", "hs", [outlier]
8686 )
8787 ),
8888 [outlier],
9393
9494 filtered = self.get_success(
9595 filter_events_for_server(
96 self._storage_controllers, "remote_hs", [outlier, evt]
96 self._storage_controllers, "remote_hs", "local_hs", [outlier, evt]
9797 )
9898 )
9999 self.assertEqual(len(filtered), 2, f"expected 2 results, got: {filtered}")
105105 # be redacted)
106106 filtered = self.get_success(
107107 filter_events_for_server(
108 self._storage_controllers, "other_server", [outlier, evt]
108 self._storage_controllers, "other_server", "local_hs", [outlier, evt]
109109 )
110110 )
111111 self.assertEqual(filtered[0], outlier)
140140 # ... and the filtering happens.
141141 filtered = self.get_success(
142142 filter_events_for_server(
143 self._storage_controllers, "test_server", events_to_filter
143 self._storage_controllers, "test_server", "local_hs", events_to_filter
144144 )
145145 )
146146
359359 store.db_pool.updates.do_next_background_update(False), by=0.1
360360 )
361361
362 def make_homeserver(self, reactor, clock):
362 def make_homeserver(self, reactor: MemoryReactor, clock: Clock):
363363 """
364364 Make and return a homeserver.
365365
366366 Args:
367367 reactor: A Twisted Reactor, or something that pretends to be one.
368 clock (synapse.util.Clock): The Clock, associated with the reactor.
368 clock: The Clock, associated with the reactor.
369369
370370 Returns:
371371 A homeserver suitable for testing.
425425
426426 Args:
427427 reactor: A Twisted Reactor, or something that pretends to be one.
428 clock (synapse.util.Clock): The Clock, associated with the reactor.
429 homeserver (synapse.server.HomeServer): The HomeServer to test
430 against.
428 clock: The Clock, associated with the reactor.
429 homeserver: The HomeServer to test against.
431430
432431 Function to optionally be overridden in subclasses.
433432 """
451450 given content.
452451
453452 Args:
454 method (bytes/unicode): The HTTP request method ("verb").
455 path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
456 escaped UTF-8 & spaces and such).
457 content (bytes or dict): The body of the request. JSON-encoded, if
458 a dict.
453 method: The HTTP request method ("verb").
454 path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces
455 and such). content (bytes or dict): The body of the request.
456 JSON-encoded, if a dict.
459457 shorthand: Whether to try and be helpful and prefix the given URL
460458 with the usual REST API path, if it doesn't contain it.
461459 federation_auth_origin: if set to not-None, we will add a fake
1010 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1111 # See the License for the specific language governing permissions and
1212 # limitations under the License.
13 from typing import NoReturn
1314 from unittest.mock import Mock
1415
1516 from twisted.internet import defer
2223
2324
2425 class CachedCallTestCase(TestCase):
25 def test_get(self):
26 def test_get(self) -> None:
2627 """
2728 Happy-path test case: makes a couple of calls and makes sure they behave
2829 correctly
2930 """
30 d = Deferred()
31 d: "Deferred[int]" = Deferred()
3132
32 async def f():
33 async def f() -> int:
3334 return await d
3435
3536 slow_call = Mock(side_effect=f)
4243 # now fire off a couple of calls
4344 completed_results = []
4445
45 async def r():
46 async def r() -> None:
4647 res = await cached_call.get()
4748 completed_results.append(res)
4849
6869 self.assertEqual(r3, 123)
6970 slow_call.assert_not_called()
7071
71 def test_fast_call(self):
72 def test_fast_call(self) -> None:
7273 """
7374 Test the behaviour when the underlying function completes immediately
7475 """
7576
76 async def f():
77 async def f() -> int:
7778 return 12
7879
7980 fast_call = Mock(side_effect=f)
9192
9293
9394 class RetryOnExceptionCachedCallTestCase(TestCase):
94 def test_get(self):
95 def test_get(self) -> None:
9596 # set up the RetryOnExceptionCachedCall around a function which will fail
9697 # (after a while)
97 d = Deferred()
98 d: "Deferred[int]" = Deferred()
9899
99 async def f1():
100 async def f1() -> NoReturn:
100101 await d
101102 raise ValueError("moo")
102103
109110 # now fire off a couple of calls
110111 completed_results = []
111112
112 async def r():
113 async def r() -> None:
113114 try:
114115 await cached_call.get()
115116 except Exception as e1:
136137 # to the getter
137138 d = Deferred()
138139
139 async def f2():
140 async def f2() -> int:
140141 return await d
141142
142143 slow_call.reset_mock()
1212 # limitations under the License.
1313
1414 from functools import partial
15 from typing import List, Tuple
1516
1617 from twisted.internet import defer
1718
2122
2223
2324 class DeferredCacheTestCase(TestCase):
24 def test_empty(self):
25 cache = DeferredCache("test")
25 def test_empty(self) -> None:
26 cache: DeferredCache[str, int] = DeferredCache("test")
2627 with self.assertRaises(KeyError):
2728 cache.get("foo")
2829
29 def test_hit(self):
30 cache = DeferredCache("test")
30 def test_hit(self) -> None:
31 cache: DeferredCache[str, int] = DeferredCache("test")
3132 cache.prefill("foo", 123)
3233
3334 self.assertEqual(self.successResultOf(cache.get("foo")), 123)
3435
35 def test_hit_deferred(self):
36 cache = DeferredCache("test")
37 origin_d = defer.Deferred()
36 def test_hit_deferred(self) -> None:
37 cache: DeferredCache[str, int] = DeferredCache("test")
38 origin_d: "defer.Deferred[int]" = defer.Deferred()
3839 set_d = cache.set("k1", origin_d)
3940
4041 # get should return an incomplete deferred
4243 self.assertFalse(get_d.called)
4344
4445 # add a callback that will make sure that the set_d gets called before the get_d
45 def check1(r):
46 def check1(r: str) -> str:
4647 self.assertTrue(set_d.called)
4748 return r
4849
5455 self.assertEqual(self.successResultOf(set_d), 99)
5556 self.assertEqual(self.successResultOf(get_d), 99)
5657
57 def test_callbacks(self):
58 def test_callbacks(self) -> None:
5859 """Invalidation callbacks are called at the right time"""
59 cache = DeferredCache("test")
60 cache: DeferredCache[str, int] = DeferredCache("test")
6061 callbacks = set()
6162
6263 # start with an entry, with a callback
6364 cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill"))
6465
6566 # now replace that entry with a pending result
66 origin_d = defer.Deferred()
67 origin_d: "defer.Deferred[int]" = defer.Deferred()
6768 set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set"))
6869
6970 # ... and also make a get request
8889 cache.prefill("k1", 30)
8990 self.assertEqual(callbacks, {"set", "get"})
9091
91 def test_set_fail(self):
92 cache = DeferredCache("test")
92 def test_set_fail(self) -> None:
93 cache: DeferredCache[str, int] = DeferredCache("test")
9394 callbacks = set()
9495
9596 # start with an entry, with a callback
9697 cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill"))
9798
9899 # now replace that entry with a pending result
99 origin_d = defer.Deferred()
100 origin_d: defer.Deferred = defer.Deferred()
100101 set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set"))
101102
102103 # ... and also make a get request
125126 cache.prefill("k1", 30)
126127 self.assertEqual(callbacks, {"prefill", "get2"})
127128
128 def test_get_immediate(self):
129 cache = DeferredCache("test")
130 d1 = defer.Deferred()
129 def test_get_immediate(self) -> None:
130 cache: DeferredCache[str, int] = DeferredCache("test")
131 d1: "defer.Deferred[int]" = defer.Deferred()
131132 cache.set("key1", d1)
132133
133134 # get_immediate should return default
141142 v = cache.get_immediate("key1", 1)
142143 self.assertEqual(v, 2)
143144
144 def test_invalidate(self):
145 cache = DeferredCache("test")
145 def test_invalidate(self) -> None:
146 cache: DeferredCache[Tuple[str], int] = DeferredCache("test")
146147 cache.prefill(("foo",), 123)
147148 cache.invalidate(("foo",))
148149
149150 with self.assertRaises(KeyError):
150151 cache.get(("foo",))
151152
152 def test_invalidate_all(self):
153 cache = DeferredCache("testcache")
153 def test_invalidate_all(self) -> None:
154 cache: DeferredCache[str, str] = DeferredCache("testcache")
154155
155156 callback_record = [False, False]
156157
157 def record_callback(idx):
158 def record_callback(idx: int) -> None:
158159 callback_record[idx] = True
159160
160161 # add a couple of pending entries
161 d1 = defer.Deferred()
162 d1: "defer.Deferred[str]" = defer.Deferred()
162163 cache.set("key1", d1, partial(record_callback, 0))
163164
164 d2 = defer.Deferred()
165 d2: "defer.Deferred[str]" = defer.Deferred()
165166 cache.set("key2", d2, partial(record_callback, 1))
166167
167168 # lookup should return pending deferreds
192193 with self.assertRaises(KeyError):
193194 cache.get("key1", None)
194195
195 def test_eviction(self):
196 cache = DeferredCache(
196 def test_eviction(self) -> None:
197 cache: DeferredCache[int, str] = DeferredCache(
197198 "test", max_entries=2, apply_cache_factor_from_config=False
198199 )
199200
207208 cache.get(2)
208209 cache.get(3)
209210
210 def test_eviction_lru(self):
211 cache = DeferredCache(
211 def test_eviction_lru(self) -> None:
212 cache: DeferredCache[int, str] = DeferredCache(
212213 "test", max_entries=2, apply_cache_factor_from_config=False
213214 )
214215
226227 cache.get(1)
227228 cache.get(3)
228229
229 def test_eviction_iterable(self):
230 cache = DeferredCache(
230 def test_eviction_iterable(self) -> None:
231 cache: DeferredCache[int, List[str]] = DeferredCache(
231232 "test",
232233 max_entries=3,
233234 apply_cache_factor_from_config=False,
1212 # See the License for the specific language governing permissions and
1313 # limitations under the License.
1414 import logging
15 from typing import Iterable, Set, Tuple
15 from typing import Iterable, Set, Tuple, cast
1616 from unittest import mock
1717
1818 from twisted.internet import defer, reactor
1919 from twisted.internet.defer import CancelledError, Deferred
20 from twisted.internet.interfaces import IReactorTime
2021
2122 from synapse.api.errors import SynapseError
2223 from synapse.logging.context import (
3637
3738
3839 def run_on_reactor():
39 d = defer.Deferred()
40 reactor.callLater(0, d.callback, 0)
40 d: "Deferred[int]" = defer.Deferred()
41 cast(IReactorTime, reactor).callLater(0, d.callback, 0)
4142 return make_deferred_yieldable(d)
4243
4344
223224 callbacks: Set[str] = set()
224225
225226 # set off an asynchronous request
226 obj.result = origin_d = defer.Deferred()
227 origin_d: Deferred = defer.Deferred()
228 obj.result = origin_d
227229
228230 d1 = obj.fn(1, on_invalidate=lambda: callbacks.add("d1"))
229231 self.assertFalse(d1.called)
261263 """Check that logcontexts are set and restored correctly when
262264 using the cache."""
263265
264 complete_lookup = defer.Deferred()
266 complete_lookup: Deferred = defer.Deferred()
265267
266268 class Cls:
267269 @descriptors.cached()
771773
772774 @descriptors.cachedList(cached_method_name="fn", list_name="args1")
773775 async def list_fn(self, args1, arg2):
774 assert current_context().name == "c1"
776 context = current_context()
777 assert isinstance(context, LoggingContext)
778 assert context.name == "c1"
775779 # we want this to behave like an asynchronous function
776780 await run_on_reactor()
777 assert current_context().name == "c1"
781 context = current_context()
782 assert isinstance(context, LoggingContext)
783 assert context.name == "c1"
778784 return self.mock(args1, arg2)
779785
780786 with LoggingContext("c1") as c1:
833839 return self.mock(args1)
834840
835841 obj = Cls()
836 deferred_result = Deferred()
842 deferred_result: "Deferred[dict]" = Deferred()
837843 obj.mock.return_value = deferred_result
838844
839845 # start off several concurrent lookups of the same key
3434 (These have cache with a short timeout_ms=, shorter than will be tested through advancing the clock)
3535 """
3636
37 def setUp(self):
37 def setUp(self) -> None:
3838 self.reactor, self.clock = get_clock()
3939
4040 def with_cache(self, name: str, ms: int = 0) -> ResponseCache:
4848 await self.clock.sleep(1)
4949 return o
5050
51 def test_cache_hit(self):
51 def test_cache_hit(self) -> None:
5252 cache = self.with_cache("keeping_cache", ms=9001)
5353
5454 expected_result = "howdy"
7373 "cache should still have the result",
7474 )
7575
76 def test_cache_miss(self):
76 def test_cache_miss(self) -> None:
7777 cache = self.with_cache("trashing_cache", ms=0)
7878
7979 expected_result = "howdy"
8989 )
9090 self.assertCountEqual([], cache.keys(), "cache should not have the result now")
9191
92 def test_cache_expire(self):
92 def test_cache_expire(self) -> None:
9393 cache = self.with_cache("short_cache", ms=1000)
9494
9595 expected_result = "howdy"
114114 self.reactor.pump((2,))
115115 self.assertCountEqual([], cache.keys(), "cache should not have the result now")
116116
117 def test_cache_wait_hit(self):
117 def test_cache_wait_hit(self) -> None:
118118 cache = self.with_cache("neutral_cache")
119119
120120 expected_result = "howdy"
130130
131131 self.assertEqual(expected_result, self.successResultOf(wrap_d))
132132
133 def test_cache_wait_expire(self):
133 def test_cache_wait_expire(self) -> None:
134134 cache = self.with_cache("medium_cache", ms=3000)
135135
136136 expected_result = "howdy"
161161 self.assertCountEqual([], cache.keys(), "cache should not have the result now")
162162
163163 @parameterized.expand([(True,), (False,)])
164 def test_cache_context_nocache(self, should_cache: bool):
164 def test_cache_context_nocache(self, should_cache: bool) -> None:
165165 """If the callback clears the should_cache bit, the result should not be cached"""
166166 cache = self.with_cache("medium_cache", ms=3000)
167167
169169
170170 call_count = 0
171171
172 async def non_caching(o: str, cache_context: ResponseCacheContext[int]):
172 async def non_caching(o: str, cache_context: ResponseCacheContext[int]) -> str:
173173 nonlocal call_count
174174 call_count += 1
175175 await self.clock.sleep(1)
1919
2020
2121 class CacheTestCase(unittest.TestCase):
22 def setUp(self):
22 def setUp(self) -> None:
2323 self.mock_timer = Mock(side_effect=lambda: 100.0)
24 self.cache = TTLCache("test_cache", self.mock_timer)
24 self.cache: TTLCache[str, str] = TTLCache("test_cache", self.mock_timer)
2525
26 def test_get(self):
26 def test_get(self) -> None:
2727 """simple set/get tests"""
2828 self.cache.set("one", "1", 10)
2929 self.cache.set("two", "2", 20)
5858 self.assertEqual(self.cache._metrics.hits, 4)
5959 self.assertEqual(self.cache._metrics.misses, 5)
6060
61 def test_expiry(self):
61 def test_expiry(self) -> None:
6262 self.cache.set("one", "1", 10)
6363 self.cache.set("two", "2", 20)
6464 self.cache.set("three", "3", 30)