Update upstream source from tag 'upstream/1.73.0'
Update to upstream version '1.73.0'
with Debian dir 8c122f4c1f40f1d1c4989902b42fd210edc223d9
Andrej Shadura
1 year, 4 months ago
3 | 3 | root = true |
4 | 4 | |
5 | 5 | # 4 space indentation |
6 | [*.py] | |
6 | [*.{py,pyi}] | |
7 | 7 | indent_style = space |
8 | 8 | indent_size = 4 |
9 | 9 | max_line_length = 88 |
73 | 73 | - Debian packages from packages.matrix.org |
74 | 74 | - pip (from PyPI) |
75 | 75 | - Other (please mention below) |
76 | - I don't know | |
77 | validations: | |
78 | required: true | |
79 | - type: input | |
80 | id: database | |
81 | attributes: | |
82 | label: Database | |
83 | description: | | |
84 | Are you using SQLite or PostgreSQL? What's the version of your database? | |
85 | ||
86 | If PostgreSQL, please also answer the following: | |
87 | - are you using a single PostgreSQL server | |
88 | or [separate servers for `main` and `state`](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#databases)? | |
89 | - have you previously ported from SQLite using the Synapse "portdb" script? | |
90 | - have you previously restored from a backup? | |
91 | validations: | |
92 | required: true | |
93 | - type: dropdown | |
94 | id: workers | |
95 | attributes: | |
96 | label: Workers | |
97 | description: | | |
98 | Are you running a single Synapse process, or are you running | |
99 | [2 or more workers](https://matrix-org.github.io/synapse/latest/workers.html)? | |
100 | options: | |
101 | - Single process | |
102 | - Multiple workers | |
103 | - I don't know | |
104 | validations: | |
105 | required: true | |
76 | 106 | - type: textarea |
77 | 107 | id: platform |
78 | 108 | attributes: |
83 | 113 | validations: |
84 | 114 | required: true |
85 | 115 | - type: textarea |
116 | id: config | |
117 | attributes: | |
118 | label: Configuration | |
119 | description: | | |
120 | Do you have any unusual config options turned on? If so, please provide details. | |
121 | ||
122 | - Experimental or undocumented features | |
123 | - [Presence](https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#presence) | |
124 | - [Message retention](https://matrix-org.github.io/synapse/latest/message_retention_policies.html) | |
125 | - [Synapse modules](https://matrix-org.github.io/synapse/latest/modules/index.html) | |
126 | - type: textarea | |
86 | 127 | id: logs |
87 | 128 | attributes: |
88 | 129 | label: Relevant log output |
89 | 130 | description: | |
90 | 131 | Please copy and paste any relevant log output, ideally at INFO or DEBUG log level. |
91 | This will be automatically formatted into code, so there is no need for backticks. | |
132 | This will be automatically formatted into code, so there is no need for backticks (`\``). | |
92 | 133 | |
93 | 134 | Please be careful to remove any personal or private data. |
94 | 135 | |
95 | **Bug reports are usually very difficult to diagnose without logging.** | |
136 | **Bug reports are usually impossible to diagnose without logging.** | |
96 | 137 | render: shell |
97 | 138 | validations: |
98 | 139 | required: true |
26 | 26 | steps: |
27 | 27 | - uses: actions/checkout@v3 |
28 | 28 | - name: Install Rust |
29 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
29 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
30 | 30 | with: |
31 | 31 | toolchain: stable |
32 | 32 | - uses: Swatinem/rust-cache@v2 |
60 | 60 | - uses: actions/checkout@v3 |
61 | 61 | |
62 | 62 | - name: Install Rust |
63 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
63 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
64 | 64 | with: |
65 | 65 | toolchain: stable |
66 | 66 | - uses: Swatinem/rust-cache@v2 |
133 | 133 | - uses: actions/checkout@v3 |
134 | 134 | |
135 | 135 | - name: Install Rust |
136 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
136 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
137 | 137 | with: |
138 | 138 | toolchain: stable |
139 | 139 | - uses: Swatinem/rust-cache@v2 |
0 | # This task does not run complement tests, see tests.yaml instead. | |
1 | # This task does not build docker images for synapse for use on docker hub, see docker.yaml instead | |
2 | ||
3 | name: Store complement-synapse image in ghcr.io | |
4 | on: | |
5 | push: | |
6 | branches: [ "master" ] | |
7 | schedule: | |
8 | - cron: '0 5 * * *' | |
9 | workflow_dispatch: | |
10 | inputs: | |
11 | branch: | |
12 | required: true | |
13 | default: 'develop' | |
14 | type: choice | |
15 | options: | |
16 | - develop | |
17 | - master | |
18 | ||
19 | # Only run this action once per pull request/branch; restart if a new commit arrives. | |
20 | # C.f. https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#concurrency | |
21 | # and https://docs.github.com/en/actions/reference/context-and-expression-syntax-for-github-actions#github-context | |
22 | concurrency: | |
23 | group: ${{ github.workflow }}-${{ github.ref }} | |
24 | cancel-in-progress: true | |
25 | ||
26 | jobs: | |
27 | build: | |
28 | name: Build and push complement image | |
29 | runs-on: ubuntu-latest | |
30 | permissions: | |
31 | contents: read | |
32 | packages: write | |
33 | steps: | |
34 | - name: Checkout specific branch (debug build) | |
35 | uses: actions/checkout@v3 | |
36 | if: github.event_name == 'workflow_dispatch' | |
37 | with: | |
38 | ref: ${{ inputs.branch }} | |
39 | - name: Checkout clean copy of develop (scheduled build) | |
40 | uses: actions/checkout@v3 | |
41 | if: github.event_name == 'schedule' | |
42 | with: | |
43 | ref: develop | |
44 | - name: Checkout clean copy of master (on-push) | |
45 | uses: actions/checkout@v3 | |
46 | if: github.event_name == 'push' | |
47 | with: | |
48 | ref: master | |
49 | - name: Login to registry | |
50 | uses: docker/login-action@v1 | |
51 | with: | |
52 | registry: ghcr.io | |
53 | username: ${{ github.actor }} | |
54 | password: ${{ secrets.GITHUB_TOKEN }} | |
55 | - name: Work out labels for complement image | |
56 | id: meta | |
57 | uses: docker/metadata-action@v4 | |
58 | with: | |
59 | images: ghcr.io/${{ github.repository }}/complement-synapse | |
60 | tags: | | |
61 | type=schedule,pattern=nightly,enable=${{ github.event_name == 'schedule'}} | |
62 | type=raw,value=develop,enable=${{ github.event_name == 'schedule' || inputs.branch == 'develop' }} | |
63 | type=raw,value=latest,enable=${{ github.event_name == 'push' || inputs.branch == 'master' }} | |
64 | type=sha,format=long | |
65 | - name: Run scripts-dev/complement.sh to generate complement-synapse:latest image. | |
66 | run: scripts-dev/complement.sh --build-only | |
67 | - name: Tag and push generated image | |
68 | run: | | |
69 | for TAG in ${{ join(fromJson(steps.meta.outputs.json).tags, ' ') }}; do | |
70 | echo "tag and push $TAG" | |
71 | docker tag complement-synapse $TAG | |
72 | docker push $TAG | |
73 | done |
26 | 26 | rust: |
27 | 27 | - 'rust/**' |
28 | 28 | - 'Cargo.toml' |
29 | - 'Cargo.lock' | |
29 | 30 | |
30 | 31 | check-sampleconfig: |
31 | 32 | runs-on: ubuntu-latest |
101 | 102 | # There don't seem to be versioned releases of this action per se: for each rust |
102 | 103 | # version there is a branch which gets constantly rebased on top of master. |
103 | 104 | # We pin to a specific commit for paranoia's sake. |
104 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
105 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
105 | 106 | with: |
106 | 107 | toolchain: 1.58.1 |
107 | 108 | components: clippy |
121 | 122 | # There don't seem to be versioned releases of this action per se: for each rust |
122 | 123 | # version there is a branch which gets constantly rebased on top of master. |
123 | 124 | # We pin to a specific commit for paranoia's sake. |
124 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
125 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
125 | 126 | with: |
126 | 127 | toolchain: 1.58.1 |
127 | 128 | components: rustfmt |
183 | 184 | # There don't seem to be versioned releases of this action per se: for each rust |
184 | 185 | # version there is a branch which gets constantly rebased on top of master. |
185 | 186 | # We pin to a specific commit for paranoia's sake. |
186 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
187 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
187 | 188 | with: |
188 | 189 | toolchain: 1.58.1 |
189 | 190 | - uses: Swatinem/rust-cache@v2 |
227 | 228 | # There don't seem to be versioned releases of this action per se: for each rust |
228 | 229 | # version there is a branch which gets constantly rebased on top of master. |
229 | 230 | # We pin to a specific commit for paranoia's sake. |
230 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
231 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
231 | 232 | with: |
232 | 233 | toolchain: 1.58.1 |
233 | 234 | - uses: Swatinem/rust-cache@v2 |
345 | 346 | # There don't seem to be versioned releases of this action per se: for each rust |
346 | 347 | # version there is a branch which gets constantly rebased on top of master. |
347 | 348 | # We pin to a specific commit for paranoia's sake. |
348 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
349 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
349 | 350 | with: |
350 | 351 | toolchain: 1.58.1 |
351 | 352 | - uses: Swatinem/rust-cache@v2 |
488 | 489 | # There don't seem to be versioned releases of this action per se: for each rust |
489 | 490 | # version there is a branch which gets constantly rebased on top of master. |
490 | 491 | # We pin to a specific commit for paranoia's sake. |
491 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
492 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
492 | 493 | with: |
493 | 494 | toolchain: 1.58.1 |
494 | 495 | - uses: Swatinem/rust-cache@v2 |
516 | 517 | # There don't seem to be versioned releases of this action per se: for each rust |
517 | 518 | # version there is a branch which gets constantly rebased on top of master. |
518 | 519 | # We pin to a specific commit for paranoia's sake. |
519 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
520 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
520 | 521 | with: |
521 | 522 | toolchain: 1.58.1 |
522 | 523 | - uses: Swatinem/rust-cache@v2 |
17 | 17 | - uses: actions/checkout@v3 |
18 | 18 | |
19 | 19 | - name: Install Rust |
20 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
20 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
21 | 21 | with: |
22 | 22 | toolchain: stable |
23 | 23 | - uses: Swatinem/rust-cache@v2 |
42 | 42 | - run: sudo apt-get -qq install xmlsec1 |
43 | 43 | |
44 | 44 | - name: Install Rust |
45 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
45 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
46 | 46 | with: |
47 | 47 | toolchain: stable |
48 | 48 | - uses: Swatinem/rust-cache@v2 |
81 | 81 | - uses: actions/checkout@v3 |
82 | 82 | |
83 | 83 | - name: Install Rust |
84 | uses: dtolnay/rust-toolchain@55c7845fad90d0ae8b2e83715cb900e5e861e8cb | |
84 | uses: dtolnay/rust-toolchain@e645b0cf01249a964ec099494d38d2da0f0b349f | |
85 | 85 | with: |
86 | 86 | toolchain: stable |
87 | 87 | - uses: Swatinem/rust-cache@v2 |
0 | Synapse 1.73.0 (2022-12-06) | |
1 | =========================== | |
2 | ||
3 | Please note that legacy Prometheus metric names have been removed in this release; see [the upgrade notes](https://github.com/matrix-org/synapse/blob/release-v1.73/docs/upgrade.md#legacy-prometheus-metric-names-have-now-been-removed) for more details. | |
4 | ||
5 | No significant changes since 1.73.0rc2. | |
6 | ||
7 | ||
8 | Synapse 1.73.0rc2 (2022-12-01) | |
9 | ============================== | |
10 | ||
11 | Bugfixes | |
12 | -------- | |
13 | ||
14 | - Fix a regression in Synapse 1.73.0rc1 where Synapse's main process would stop responding to HTTP requests when a user with a large number of devices logs in. ([\#14582](https://github.com/matrix-org/synapse/issues/14582)) | |
15 | ||
16 | ||
17 | Synapse 1.73.0rc1 (2022-11-29) | |
18 | ============================== | |
19 | ||
20 | Features | |
21 | -------- | |
22 | ||
23 | - Speed-up `/messages` with `filter_events_for_client` optimizations. ([\#14527](https://github.com/matrix-org/synapse/issues/14527)) | |
24 | - Improve DB performance by reducing amount of data that gets read in `device_lists_changes_in_room`. ([\#14534](https://github.com/matrix-org/synapse/issues/14534)) | |
25 | - Adds support for handling avatar in SSO OIDC login. Contributed by @ashfame. ([\#13917](https://github.com/matrix-org/synapse/issues/13917)) | |
26 | - Move MSC3030 `/timestamp_to_event` endpoints to stable `v1` location (`/_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction>`, `/_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction>`). ([\#14471](https://github.com/matrix-org/synapse/issues/14471)) | |
27 | - Reduce database load of [Client-Server endpoints](https://spec.matrix.org/v1.5/client-server-api/#aggregations) which return bundled aggregations. ([\#14491](https://github.com/matrix-org/synapse/issues/14491), [\#14508](https://github.com/matrix-org/synapse/issues/14508), [\#14510](https://github.com/matrix-org/synapse/issues/14510)) | |
28 | - Add unstable support for an Extensible Events room version (`org.matrix.msc1767.10`) via [MSC1767](https://github.com/matrix-org/matrix-spec-proposals/pull/1767), [MSC3931](https://github.com/matrix-org/matrix-spec-proposals/pull/3931), [MSC3932](https://github.com/matrix-org/matrix-spec-proposals/pull/3932), and [MSC3933](https://github.com/matrix-org/matrix-spec-proposals/pull/3933). ([\#14520](https://github.com/matrix-org/synapse/issues/14520), [\#14521](https://github.com/matrix-org/synapse/issues/14521), [\#14524](https://github.com/matrix-org/synapse/issues/14524)) | |
29 | - Prune user's old devices on login if they have too many. ([\#14038](https://github.com/matrix-org/synapse/issues/14038), [\#14580](https://github.com/matrix-org/synapse/issues/14580)) | |
30 | ||
31 | ||
32 | Bugfixes | |
33 | -------- | |
34 | ||
35 | - Fix a long-standing bug where paginating from the start of a room did not work. Contributed by @gnunicorn. ([\#14149](https://github.com/matrix-org/synapse/issues/14149)) | |
36 | - Fix a bug introduced in Synapse 1.58.0 where a user with presence state `org.matrix.msc3026.busy` would mistakenly be set to `online` when calling `/sync` or `/events` on a worker process. ([\#14393](https://github.com/matrix-org/synapse/issues/14393)) | |
37 | - Fix a bug introduced in Synapse 1.70.0 where a receipt's thread ID was not sent over federation. ([\#14466](https://github.com/matrix-org/synapse/issues/14466)) | |
38 | - Fix a long-standing bug where the [List media admin API](https://matrix-org.github.io/synapse/latest/admin_api/media_admin_api.html#list-all-media-in-a-room) would fail when processing an image with broken thumbnail information. ([\#14537](https://github.com/matrix-org/synapse/issues/14537)) | |
39 | - Fix a bug introduced in Synapse 1.67.0 where two logging context warnings would be logged on startup. ([\#14574](https://github.com/matrix-org/synapse/issues/14574)) | |
40 | - In application service transactions that include the experimental `org.matrix.msc3202.device_one_time_key_counts` key, include a duplicate key of `org.matrix.msc3202.device_one_time_keys_count` to match the name proposed by [MSC3202](https://github.com/matrix-org/matrix-spec-proposals/pull/3202). ([\#14565](https://github.com/matrix-org/synapse/issues/14565)) | |
41 | - Fix a bug introduced in Synapse 0.9 where Synapse would fail to fetch server keys whose IDs contain a forward slash. ([\#14490](https://github.com/matrix-org/synapse/issues/14490)) | |
42 | ||
43 | ||
44 | Improved Documentation | |
45 | ---------------------- | |
46 | ||
47 | - Fixed link to 'Synapse administration endpoints'. ([\#14499](https://github.com/matrix-org/synapse/issues/14499)) | |
48 | ||
49 | ||
50 | Deprecations and Removals | |
51 | ------------------------- | |
52 | ||
53 | - Remove legacy Prometheus metrics names. They were deprecated in Synapse v1.69.0 and disabled by default in Synapse v1.71.0. ([\#14538](https://github.com/matrix-org/synapse/issues/14538)) | |
54 | ||
55 | ||
56 | Internal Changes | |
57 | ---------------- | |
58 | ||
59 | - Improve type hinting throughout Synapse. ([\#14055](https://github.com/matrix-org/synapse/issues/14055), [\#14412](https://github.com/matrix-org/synapse/issues/14412), [\#14529](https://github.com/matrix-org/synapse/issues/14529), [\#14452](https://github.com/matrix-org/synapse/issues/14452)). | |
60 | - Remove old stream ID tracking code. Contributed by Nick @Beeper (@fizzadar). ([\#14376](https://github.com/matrix-org/synapse/issues/14376), [\#14468](https://github.com/matrix-org/synapse/issues/14468)) | |
61 | - Remove the `worker_main_http_uri` configuration setting. This is now handled via internal replication. ([\#14400](https://github.com/matrix-org/synapse/issues/14400), [\#14476](https://github.com/matrix-org/synapse/issues/14476)) | |
62 | - Refactor `federation_sender` and `pusher` configuration loading. ([\#14496](https://github.com/matrix-org/synapse/issues/14496)) | |
63 | ([\#14509](https://github.com/matrix-org/synapse/issues/14509), [\#14573](https://github.com/matrix-org/synapse/issues/14573)) | |
64 | - Faster joins: do not wait for full state when creating events to send. ([\#14403](https://github.com/matrix-org/synapse/issues/14403)) | |
65 | - Faster joins: filter out non local events when a room doesn't have its full state. ([\#14404](https://github.com/matrix-org/synapse/issues/14404)) | |
66 | - Faster joins: send events to initial list of servers if we don't have the full state yet. ([\#14408](https://github.com/matrix-org/synapse/issues/14408)) | |
67 | - Faster joins: use servers list approximation received during `send_join` (potentially updated with received membership events) in `assert_host_in_room`. ([\#14515](https://github.com/matrix-org/synapse/issues/14515)) | |
68 | - Fix type logic in TCP replication code that prevented correctly ignoring blank commands. ([\#14449](https://github.com/matrix-org/synapse/issues/14449)) | |
69 | - Remove option to skip locking of tables when performing emulated upserts, to avoid a class of bugs in future. ([\#14469](https://github.com/matrix-org/synapse/issues/14469)) | |
70 | - `scripts-dev/federation_client`: Fix routing on servers with `.well-known` files. ([\#14479](https://github.com/matrix-org/synapse/issues/14479)) | |
71 | - Reduce default third party invite rate limit to 216 invites per day. ([\#14487](https://github.com/matrix-org/synapse/issues/14487)) | |
72 | - Refactor conversion of device list changes in room to outbound pokes to track unconverted rows using a `(stream ID, room ID)` position instead of updating the `converted_to_destinations` flag on every row. ([\#14516](https://github.com/matrix-org/synapse/issues/14516)) | |
73 | - Add more prompts to the bug report form. ([\#14522](https://github.com/matrix-org/synapse/issues/14522)) | |
74 | - Extend editorconfig rules on indent and line length to `.pyi` files. ([\#14526](https://github.com/matrix-org/synapse/issues/14526)) | |
75 | - Run Rust CI when `Cargo.lock` changes. This is particularly useful for dependabot updates. ([\#14571](https://github.com/matrix-org/synapse/issues/14571)) | |
76 | - Fix a possible variable shadow in `create_new_client_event`. ([\#14575](https://github.com/matrix-org/synapse/issues/14575)) | |
77 | - Bump various dependencies in the `poetry.lock` file and in CI scripts. ([\#14557](https://github.com/matrix-org/synapse/issues/14557), [\#14559](https://github.com/matrix-org/synapse/issues/14559), [\#14560](https://github.com/matrix-org/synapse/issues/14560), [\#14500](https://github.com/matrix-org/synapse/issues/14500), [\#14501](https://github.com/matrix-org/synapse/issues/14501), [\#14502](https://github.com/matrix-org/synapse/issues/14502), [\#14503](https://github.com/matrix-org/synapse/issues/14503), [\#14504](https://github.com/matrix-org/synapse/issues/14504), [\#14505](https://github.com/matrix-org/synapse/issues/14505)). | |
78 | ||
79 | ||
0 | 80 | Synapse 1.72.0 (2022-11-22) |
1 | 81 | =========================== |
2 | 82 |
322 | 322 | |
323 | 323 | [[package]] |
324 | 324 | name = "serde" |
325 | version = "1.0.147" | |
326 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
327 | checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" | |
325 | version = "1.0.148" | |
326 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
327 | checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc" | |
328 | 328 | dependencies = [ |
329 | 329 | "serde_derive", |
330 | 330 | ] |
331 | 331 | |
332 | 332 | [[package]] |
333 | 333 | name = "serde_derive" |
334 | version = "1.0.147" | |
335 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
336 | checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" | |
334 | version = "1.0.148" | |
335 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
336 | checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c" | |
337 | 337 | dependencies = [ |
338 | 338 | "proc-macro2", |
339 | 339 | "quote", |
342 | 342 | |
343 | 343 | [[package]] |
344 | 344 | name = "serde_json" |
345 | version = "1.0.87" | |
346 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
347 | checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" | |
345 | version = "1.0.89" | |
346 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
347 | checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" | |
348 | 348 | dependencies = [ |
349 | 349 | "itoa", |
350 | 350 | "ryu", |
365 | 365 | |
366 | 366 | [[package]] |
367 | 367 | name = "syn" |
368 | version = "1.0.102" | |
369 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
370 | checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1" | |
368 | version = "1.0.104" | |
369 | source = "registry+https://github.com/rust-lang/crates.io-index" | |
370 | checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce" | |
371 | 371 | dependencies = [ |
372 | 372 | "proc-macro2", |
373 | 373 | "quote", |
99 | 99 | # client-side support for partial state in /send_join responses |
100 | 100 | faster_joins: true |
101 | 101 | {% endif %} |
102 | # Enable jump to date endpoint | |
103 | msc3030_enabled: true | |
104 | 102 | # Filtering /messages by relation type. |
105 | 103 | msc3874_enabled: true |
106 | 104 |
139 | 139 | "^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event", |
140 | 140 | "^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms", |
141 | 141 | "^/_matrix/client/(api/v1|r0|v3|unstable/.*)/rooms/.*/aliases", |
142 | "^/_matrix/client/v1/rooms/.*/timestamp_to_event$", | |
142 | 143 | "^/_matrix/client/(api/v1|r0|v3|unstable)/search", |
143 | 144 | ], |
144 | 145 | "shared_extra_conf": {}, |
162 | 163 | "^/_matrix/federation/(v1|v2)/invite/", |
163 | 164 | "^/_matrix/federation/(v1|v2)/query_auth/", |
164 | 165 | "^/_matrix/federation/(v1|v2)/event_auth/", |
166 | "^/_matrix/federation/v1/timestamp_to_event/", | |
165 | 167 | "^/_matrix/federation/(v1|v2)/exchange_third_party_invite/", |
166 | 168 | "^/_matrix/federation/(v1|v2)/user/devices/", |
167 | 169 | "^/_matrix/federation/(v1|v2)/get_groups_publicised$", |
212 | 214 | "listener_resources": ["client", "replication"], |
213 | 215 | "endpoint_patterns": ["^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload"], |
214 | 216 | "shared_extra_conf": {}, |
215 | "worker_extra_conf": ( | |
216 | "worker_main_http_uri: http://127.0.0.1:%d" | |
217 | % (MAIN_PROCESS_HTTP_LISTENER_PORT,) | |
218 | ), | |
217 | "worker_extra_conf": "", | |
219 | 218 | }, |
220 | 219 | "account_data": { |
221 | 220 | "app": "synapse.app.generic_worker", |
86 | 86 | wget https://packages.matrix.org/debian/pool/main/m/matrix-synapse-py3/matrix-synapse-py3_1.3.0+stretch1_amd64.deb |
87 | 87 | dpkg -i matrix-synapse-py3_1.3.0+stretch1_amd64.deb |
88 | 88 | ``` |
89 | ||
90 | # Upgrading to v1.73.0 | |
91 | ||
92 | ## Legacy Prometheus metric names have now been removed | |
93 | ||
94 | Synapse v1.69.0 included the deprecation of legacy Prometheus metric names | |
95 | and offered an option to disable them. | |
96 | Synapse v1.71.0 disabled legacy Prometheus metric names by default. | |
97 | ||
98 | This version, v1.73.0, removes those legacy Prometheus metric names entirely. | |
99 | This also means that the `enable_legacy_metrics` configuration option has been | |
100 | removed; it will no longer be possible to re-enable the legacy metric names. | |
101 | ||
102 | If you use metrics and have not yet updated your Grafana dashboard(s), | |
103 | Prometheus console(s) or alerting rule(s), please consider doing so when upgrading | |
104 | to this version. | |
105 | Note that the included Grafana dashboard was updated in v1.72.0 to correct some | |
106 | metric names which were missed when legacy metrics were disabled by default. | |
107 | ||
108 | See [v1.69.0: Deprecation of legacy Prometheus metric names](#deprecation-of-legacy-prometheus-metric-names) | |
109 | for more context. | |
110 | ||
89 | 111 | |
90 | 112 | # Upgrading to v1.72.0 |
91 | 113 |
18 | 18 | Finding your user's `access_token` is client-dependent, but will usually be shown in the client's settings. |
19 | 19 | |
20 | 20 | ## Making an Admin API request |
21 | For security reasons, we [recommend](reverse_proxy.md#synapse-administration-endpoints) | |
21 | For security reasons, we [recommend](../../../reverse_proxy.md#synapse-administration-endpoints) | |
22 | 22 | that the Admin API (`/_synapse/admin/...`) should be hidden from public view using a |
23 | 23 | reverse proxy. This means you should typically query the Admin API from a terminal on |
24 | 24 | the machine which runs Synapse. |
2435 | 2435 | ```yaml |
2436 | 2436 | enable_metrics: true |
2437 | 2437 | ``` |
2438 | --- | |
2439 | ### `enable_legacy_metrics` | |
2440 | ||
2441 | Set to `true` to publish both legacy and non-legacy Prometheus metric names, | |
2442 | or to `false` to only publish non-legacy Prometheus metric names. | |
2443 | Defaults to `false`. Has no effect if `enable_metrics` is `false`. | |
2444 | **In Synapse v1.67.0 up to and including Synapse v1.70.1, this defaulted to `true`.** | |
2445 | ||
2446 | Legacy metric names include: | |
2447 | - metrics containing colons in the name, such as `synapse_util_caches_response_cache:hits`, because colons are supposed to be reserved for user-defined recording rules; | |
2448 | - counters that don't end with the `_total` suffix, such as `synapse_federation_client_sent_edus`, therefore not adhering to the OpenMetrics standard. | |
2449 | ||
2450 | These legacy metric names are unconventional and not compliant with OpenMetrics standards. | |
2451 | They are included for backwards compatibility. | |
2452 | ||
2453 | Example configuration: | |
2454 | ```yaml | |
2455 | enable_legacy_metrics: false | |
2456 | ``` | |
2457 | ||
2458 | See https://github.com/matrix-org/synapse/issues/11106 for context. | |
2459 | ||
2460 | *Since v1.67.0.* | |
2461 | ||
2462 | **Will be removed in v1.73.0.** | |
2463 | 2438 | --- |
2464 | 2439 | ### `sentry` |
2465 | 2440 | |
2992 | 2967 | |
2993 | 2968 | For the default provider, the following settings are available: |
2994 | 2969 | |
2995 | * subject_claim: name of the claim containing a unique identifier | |
2970 | * `subject_claim`: name of the claim containing a unique identifier | |
2996 | 2971 | for the user. Defaults to 'sub', which OpenID Connect |
2997 | 2972 | compliant providers should provide. |
2973 | ||
2974 | * `picture_claim`: name of the claim containing an url for the user's profile picture. | |
2975 | Defaults to 'picture', which OpenID Connect compliant providers should provide | |
2976 | and has to refer to a direct image file such as PNG, JPEG, or GIF image file. | |
2977 | ||
2978 | Currently only supported in monolithic (single-process) server configurations | |
2979 | where the media repository runs within the Synapse process. | |
2998 | 2980 | |
2999 | 2981 | * `localpart_template`: Jinja2 template for the localpart of the MXID. |
3000 | 2982 | If this is not set, the user will be prompted to choose their |
134 | 134 | [`worker_replication_http_port`](usage/configuration/config_documentation.md#worker_replication_http_port)). |
135 | 135 | * If handling HTTP requests, a [`worker_listeners`](usage/configuration/config_documentation.md#worker_listeners) option |
136 | 136 | with an `http` listener. |
137 | * If handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for | |
138 | the main process (`worker_main_http_uri`). | |
137 | * **Synapse 1.72 and older:** if handling the `^/_matrix/client/v3/keys/upload` endpoint, the HTTP URI for | |
138 | the main process (`worker_main_http_uri`). This config option is no longer required and is ignored when running Synapse 1.73 and newer. | |
139 | 139 | |
140 | 140 | For example: |
141 | 141 | |
190 | 190 | ^/_matrix/federation/(v1|v2)/send_leave/ |
191 | 191 | ^/_matrix/federation/(v1|v2)/invite/ |
192 | 192 | ^/_matrix/federation/v1/event_auth/ |
193 | ^/_matrix/federation/v1/timestamp_to_event/ | |
193 | 194 | ^/_matrix/federation/v1/exchange_third_party_invite/ |
194 | 195 | ^/_matrix/federation/v1/user/devices/ |
195 | 196 | ^/_matrix/key/v2/query |
217 | 218 | ^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$ |
218 | 219 | ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/ |
219 | 220 | ^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$ |
221 | ^/_matrix/client/v1/rooms/.*/timestamp_to_event$ | |
220 | 222 | ^/_matrix/client/(api/v1|r0|v3|unstable)/search$ |
221 | 223 | |
222 | 224 | # Encryption requests |
223 | # Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri` | |
224 | 225 | ^/_matrix/client/(r0|v3|unstable)/keys/query$ |
225 | 226 | ^/_matrix/client/(r0|v3|unstable)/keys/changes$ |
226 | 227 | ^/_matrix/client/(r0|v3|unstable)/keys/claim$ |
375 | 376 | - persisting them to the DB, and finally |
376 | 377 | - updating the events stream. |
377 | 378 | |
378 | Because load is sharded in this way, you *must* restart all worker instances when | |
379 | Because load is sharded in this way, you *must* restart all worker instances when | |
379 | 380 | adding or removing event persisters. |
380 | 381 | |
381 | 382 | An `event_persister` should not be mistaken for an `event_creator`. |
10 | 10 | local_partial_types = True |
11 | 11 | no_implicit_optional = True |
12 | 12 | disallow_untyped_defs = True |
13 | strict_equality = True | |
13 | 14 | |
14 | 15 | files = |
15 | 16 | docker/, |
57 | 58 | |tests/server_notices/test_resource_limits_server_notices.py |
58 | 59 | |tests/test_state.py |
59 | 60 | |tests/test_terms_auth.py |
60 | |tests/util/caches/test_cached_call.py | |
61 | |tests/util/caches/test_deferred_cache.py | |
62 | |tests/util/caches/test_descriptors.py | |
63 | |tests/util/caches/test_response_cache.py | |
64 | |tests/util/caches/test_ttlcache.py | |
65 | 61 | |tests/util/test_async_helpers.py |
66 | 62 | |tests/util/test_batching_queue.py |
67 | 63 | |tests/util/test_dict_cache.py |
116 | 112 | [mypy-tests.state.test_profile] |
117 | 113 | disallow_untyped_defs = True |
118 | 114 | |
115 | [mypy-tests.storage.test_id_generators] | |
116 | disallow_untyped_defs = True | |
117 | ||
119 | 118 | [mypy-tests.storage.test_profile] |
119 | disallow_untyped_defs = True | |
120 | ||
121 | [mypy-tests.handlers.test_sso] | |
120 | 122 | disallow_untyped_defs = True |
121 | 123 | |
122 | 124 | [mypy-tests.storage.test_user_directory] |
128 | 130 | [mypy-tests.federation.transport.test_client] |
129 | 131 | disallow_untyped_defs = True |
130 | 132 | |
133 | [mypy-tests.util.caches.*] | |
134 | disallow_untyped_defs = True | |
135 | ||
136 | [mypy-tests.util.caches.test_descriptors] | |
137 | disallow_untyped_defs = False | |
138 | ||
131 | 139 | [mypy-tests.utils] |
132 | 140 | disallow_untyped_defs = True |
133 | ||
134 | 141 | |
135 | 142 | ;; Dependencies without annotations |
136 | 143 | ;; Before ignoring a module, check to see if type stubs are available. |
662 | 662 | |
663 | 663 | [[package]] |
664 | 664 | name = "phonenumbers" |
665 | version = "8.12.56" | |
665 | version = "8.13.0" | |
666 | 666 | description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." |
667 | 667 | category = "main" |
668 | 668 | optional = false |
813 | 813 | |
814 | 814 | [[package]] |
815 | 815 | name = "pygithub" |
816 | version = "1.56" | |
816 | version = "1.57" | |
817 | 817 | description = "Use the full Github API v3" |
818 | 818 | category = "dev" |
819 | 819 | optional = false |
820 | python-versions = ">=3.6" | |
820 | python-versions = ">=3.7" | |
821 | 821 | |
822 | 822 | [package.dependencies] |
823 | 823 | deprecated = "*" |
824 | pyjwt = ">=2.0" | |
824 | pyjwt = ">=2.4.0" | |
825 | 825 | pynacl = ">=1.4.0" |
826 | 826 | requests = ">=2.14.0" |
827 | 827 | |
1075 | 1075 | |
1076 | 1076 | [[package]] |
1077 | 1077 | name = "sentry-sdk" |
1078 | version = "1.10.1" | |
1078 | version = "1.11.0" | |
1079 | 1079 | description = "Python client for Sentry (https://sentry.io)" |
1080 | 1080 | category = "main" |
1081 | 1081 | optional = true |
1097 | 1097 | flask = ["blinker (>=1.1)", "flask (>=0.11)"] |
1098 | 1098 | httpx = ["httpx (>=0.16.0)"] |
1099 | 1099 | pure-eval = ["asttokens", "executing", "pure-eval"] |
1100 | pymongo = ["pymongo (>=3.1)"] | |
1100 | 1101 | pyspark = ["pyspark (>=2.4.4)"] |
1101 | 1102 | quart = ["blinker (>=1.1)", "quart (>=0.16.1)"] |
1102 | 1103 | rq = ["rq (>=0.6)"] |
1255 | 1256 | |
1256 | 1257 | [[package]] |
1257 | 1258 | name = "towncrier" |
1258 | version = "21.9.0" | |
1259 | version = "22.8.0" | |
1259 | 1260 | description = "Building newsfiles for your project." |
1260 | 1261 | category = "dev" |
1261 | 1262 | optional = false |
1262 | python-versions = "*" | |
1263 | python-versions = ">=3.7" | |
1263 | 1264 | |
1264 | 1265 | [package.dependencies] |
1265 | 1266 | click = "*" |
1267 | 1268 | incremental = "*" |
1268 | 1269 | jinja2 = "*" |
1269 | 1270 | setuptools = "*" |
1270 | tomli = {version = "*", markers = "python_version >= \"3.6\""} | |
1271 | tomli = "*" | |
1271 | 1272 | |
1272 | 1273 | [package.extras] |
1273 | 1274 | dev = ["packaging"] |
1438 | 1439 | |
1439 | 1440 | [[package]] |
1440 | 1441 | name = "types-pillow" |
1441 | version = "9.2.2.1" | |
1442 | version = "9.3.0.1" | |
1442 | 1443 | description = "Typing stubs for Pillow" |
1443 | 1444 | category = "dev" |
1444 | 1445 | optional = false |
2256 | 2257 | {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"}, |
2257 | 2258 | ] |
2258 | 2259 | phonenumbers = [ |
2259 | {file = "phonenumbers-8.12.56-py2.py3-none-any.whl", hash = "sha256:80a7422cf0999a6f9b7a2e6cfbdbbfcc56ab5b75414dc3b805bbec91276b64a3"}, | |
2260 | {file = "phonenumbers-8.12.56.tar.gz", hash = "sha256:82a4f226c930d02dcdf6d4b29e4cfd8678991fe65c2efd5fdd143557186f0868"}, | |
2260 | {file = "phonenumbers-8.13.0-py2.py3-none-any.whl", hash = "sha256:dbaea9e4005a976bcf18fbe2bb87cb9cd0a3f119136f04188ac412d7741cebf0"}, | |
2261 | {file = "phonenumbers-8.13.0.tar.gz", hash = "sha256:93745d7afd38e246660bb601b07deac54eeb76c8e5e43f5e83333b0383a0a1e4"}, | |
2261 | 2262 | ] |
2262 | 2263 | pillow = [ |
2263 | 2264 | {file = "Pillow-9.3.0-1-cp37-cp37m-win32.whl", hash = "sha256:e6ea6b856a74d560d9326c0f5895ef8050126acfdc7ca08ad703eb0081e82b74"}, |
2418 | 2419 | {file = "pyflakes-2.5.0.tar.gz", hash = "sha256:491feb020dca48ccc562a8c0cbe8df07ee13078df59813b83959cbdada312ea3"}, |
2419 | 2420 | ] |
2420 | 2421 | pygithub = [ |
2421 | {file = "PyGithub-1.56-py3-none-any.whl", hash = "sha256:d15f13d82165306da8a68aefc0f848a6f6432d5febbff13b60a94758ce3ef8b5"}, | |
2422 | {file = "PyGithub-1.56.tar.gz", hash = "sha256:80c6d85cf0f9418ffeb840fd105840af694c4f17e102970badbaf678251f2a01"}, | |
2422 | {file = "PyGithub-1.57-py3-none-any.whl", hash = "sha256:5822febeac2391f1306c55a99af2bc8f86c8bf82ded000030cd02c18f31b731f"}, | |
2423 | {file = "PyGithub-1.57.tar.gz", hash = "sha256:c273f252b278fb81f1769505cc6921bdb6791e1cebd6ac850cc97dad13c31ff3"}, | |
2423 | 2424 | ] |
2424 | 2425 | pygments = [ |
2425 | 2426 | {file = "Pygments-2.11.2-py3-none-any.whl", hash = "sha256:44238f1b60a76d78fc8ca0528ee429702aae011c265fe6a8dd8b63049ae41c65"}, |
2567 | 2568 | {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, |
2568 | 2569 | ] |
2569 | 2570 | sentry-sdk = [ |
2570 | {file = "sentry-sdk-1.10.1.tar.gz", hash = "sha256:105faf7bd7b7fa25653404619ee261527266b14103fe1389e0ce077bd23a9691"}, | |
2571 | {file = "sentry_sdk-1.10.1-py2.py3-none-any.whl", hash = "sha256:06c0fa9ccfdc80d7e3b5d2021978d6eb9351fa49db9b5847cf4d1f2a473414ad"}, | |
2571 | {file = "sentry-sdk-1.11.0.tar.gz", hash = "sha256:e7b78a1ddf97a5f715a50ab8c3f7a93f78b114c67307785ee828ef67a5d6f117"}, | |
2572 | {file = "sentry_sdk-1.11.0-py2.py3-none-any.whl", hash = "sha256:f467e6c7fac23d4d42bc83eb049c400f756cd2d65ab44f0cc1165d0c7c3d40bc"}, | |
2572 | 2573 | ] |
2573 | 2574 | service-identity = [ |
2574 | 2575 | {file = "service-identity-21.1.0.tar.gz", hash = "sha256:6e6c6086ca271dc11b033d17c3a8bea9f24ebff920c587da090afc9519419d34"}, |
2719 | 2720 | {file = "tornado-6.1.tar.gz", hash = "sha256:33c6e81d7bd55b468d2e793517c909b139960b6c790a60b7991b9b6b76fb9791"}, |
2720 | 2721 | ] |
2721 | 2722 | towncrier = [ |
2722 | {file = "towncrier-21.9.0-py2.py3-none-any.whl", hash = "sha256:fc5a88a2a54988e3a8ed2b60d553599da8330f65722cc607c839614ed87e0f92"}, | |
2723 | {file = "towncrier-21.9.0.tar.gz", hash = "sha256:9cb6f45c16e1a1eec9d0e7651165e7be60cd0ab81d13a5c96ca97a498ae87f48"}, | |
2723 | {file = "towncrier-22.8.0-py2.py3-none-any.whl", hash = "sha256:3b780c3d966e1b26414830aec3d15000654b31e64e024f3e5fd128b4c6eb8f47"}, | |
2724 | {file = "towncrier-22.8.0.tar.gz", hash = "sha256:7d3839b033859b45fb55df82b74cfd702431933c0cc9f287a5a7ea3e05d042cb"}, | |
2724 | 2725 | ] |
2725 | 2726 | treq = [ |
2726 | 2727 | {file = "treq-22.2.0-py3-none-any.whl", hash = "sha256:27d95b07c5c14be3e7b280416139b036087617ad5595be913b1f9b3ce981b9b2"}, |
2807 | 2808 | {file = "types_opentracing-2.4.10-py3-none-any.whl", hash = "sha256:66d9cfbbdc4a6f8ca8189a15ad26f0fe41cee84c07057759c5d194e2505b84c2"}, |
2808 | 2809 | ] |
2809 | 2810 | types-pillow = [ |
2810 | {file = "types-Pillow-9.2.2.1.tar.gz", hash = "sha256:85c139e06e1c46ec5f9c634d5c54a156b0958d5d0e8be024ed353db0c804b426"}, | |
2811 | {file = "types_Pillow-9.2.2.1-py3-none-any.whl", hash = "sha256:3a6a871cade8428433a21ef459bb0a65532b87d05f9e836a0664431ce445bdcf"}, | |
2811 | {file = "types-Pillow-9.3.0.1.tar.gz", hash = "sha256:f3b7cada3fa496c78d75253c6b1f07a843d625f42e5639b320a72acaff6f7cfb"}, | |
2812 | {file = "types_Pillow-9.3.0.1-py3-none-any.whl", hash = "sha256:79837755fe9659f29efd1016e9903ac4a500e0c73260483f07296bd6ca47668b"}, | |
2812 | 2813 | ] |
2813 | 2814 | types-psycopg2 = [ |
2814 | 2815 | {file = "types-psycopg2-2.9.21.1.tar.gz", hash = "sha256:f5532cf15afdc6b5ebb1e59b7d896617217321f488fd1fbd74e7efb94decfab6"}, |
56 | 56 | |
57 | 57 | [tool.poetry] |
58 | 58 | name = "matrix-synapse" |
59 | version = "1.72.0" | |
59 | version = "1.73.0" | |
60 | 60 | description = "Homeserver for the Matrix decentralised comms protocol" |
61 | 61 | authors = ["Matrix.org Team and Contributors <packages@matrix.org>"] |
62 | 62 | license = "Apache-2.0" |
274 | 274 | default_enabled: true, |
275 | 275 | }, |
276 | 276 | PushRule { |
277 | rule_id: Cow::Borrowed( | |
278 | "global/underride/.org.matrix.msc3933.rule.extensible.encrypted_room_one_to_one", | |
279 | ), | |
280 | priority_class: 1, | |
281 | conditions: Cow::Borrowed(&[ | |
282 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
283 | key: Cow::Borrowed("type"), | |
284 | // MSC3933: Type changed from template rule - see MSC. | |
285 | pattern: Some(Cow::Borrowed("org.matrix.msc1767.encrypted")), | |
286 | pattern_type: None, | |
287 | })), | |
288 | Condition::Known(KnownCondition::RoomMemberCount { | |
289 | is: Some(Cow::Borrowed("2")), | |
290 | }), | |
291 | // MSC3933: Add condition on top of template rule - see MSC. | |
292 | Condition::Known(KnownCondition::RoomVersionSupports { | |
293 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
294 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
295 | }), | |
296 | ]), | |
297 | actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]), | |
298 | default: true, | |
299 | default_enabled: true, | |
300 | }, | |
301 | PushRule { | |
302 | rule_id: Cow::Borrowed( | |
303 | "global/underride/.org.matrix.msc3933.rule.extensible.message.room_one_to_one", | |
304 | ), | |
305 | priority_class: 1, | |
306 | conditions: Cow::Borrowed(&[ | |
307 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
308 | key: Cow::Borrowed("type"), | |
309 | // MSC3933: Type changed from template rule - see MSC. | |
310 | pattern: Some(Cow::Borrowed("org.matrix.msc1767.message")), | |
311 | pattern_type: None, | |
312 | })), | |
313 | Condition::Known(KnownCondition::RoomMemberCount { | |
314 | is: Some(Cow::Borrowed("2")), | |
315 | }), | |
316 | // MSC3933: Add condition on top of template rule - see MSC. | |
317 | Condition::Known(KnownCondition::RoomVersionSupports { | |
318 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
319 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
320 | }), | |
321 | ]), | |
322 | actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]), | |
323 | default: true, | |
324 | default_enabled: true, | |
325 | }, | |
326 | PushRule { | |
327 | rule_id: Cow::Borrowed( | |
328 | "global/underride/.org.matrix.msc3933.rule.extensible.file.room_one_to_one", | |
329 | ), | |
330 | priority_class: 1, | |
331 | conditions: Cow::Borrowed(&[ | |
332 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
333 | key: Cow::Borrowed("type"), | |
334 | // MSC3933: Type changed from template rule - see MSC. | |
335 | pattern: Some(Cow::Borrowed("org.matrix.msc1767.file")), | |
336 | pattern_type: None, | |
337 | })), | |
338 | Condition::Known(KnownCondition::RoomMemberCount { | |
339 | is: Some(Cow::Borrowed("2")), | |
340 | }), | |
341 | // MSC3933: Add condition on top of template rule - see MSC. | |
342 | Condition::Known(KnownCondition::RoomVersionSupports { | |
343 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
344 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
345 | }), | |
346 | ]), | |
347 | actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]), | |
348 | default: true, | |
349 | default_enabled: true, | |
350 | }, | |
351 | PushRule { | |
352 | rule_id: Cow::Borrowed( | |
353 | "global/underride/.org.matrix.msc3933.rule.extensible.image.room_one_to_one", | |
354 | ), | |
355 | priority_class: 1, | |
356 | conditions: Cow::Borrowed(&[ | |
357 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
358 | key: Cow::Borrowed("type"), | |
359 | // MSC3933: Type changed from template rule - see MSC. | |
360 | pattern: Some(Cow::Borrowed("org.matrix.msc1767.image")), | |
361 | pattern_type: None, | |
362 | })), | |
363 | Condition::Known(KnownCondition::RoomMemberCount { | |
364 | is: Some(Cow::Borrowed("2")), | |
365 | }), | |
366 | // MSC3933: Add condition on top of template rule - see MSC. | |
367 | Condition::Known(KnownCondition::RoomVersionSupports { | |
368 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
369 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
370 | }), | |
371 | ]), | |
372 | actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]), | |
373 | default: true, | |
374 | default_enabled: true, | |
375 | }, | |
376 | PushRule { | |
377 | rule_id: Cow::Borrowed( | |
378 | "global/underride/.org.matrix.msc3933.rule.extensible.video.room_one_to_one", | |
379 | ), | |
380 | priority_class: 1, | |
381 | conditions: Cow::Borrowed(&[ | |
382 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
383 | key: Cow::Borrowed("type"), | |
384 | // MSC3933: Type changed from template rule - see MSC. | |
385 | pattern: Some(Cow::Borrowed("org.matrix.msc1767.video")), | |
386 | pattern_type: None, | |
387 | })), | |
388 | Condition::Known(KnownCondition::RoomMemberCount { | |
389 | is: Some(Cow::Borrowed("2")), | |
390 | }), | |
391 | // MSC3933: Add condition on top of template rule - see MSC. | |
392 | Condition::Known(KnownCondition::RoomVersionSupports { | |
393 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
394 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
395 | }), | |
396 | ]), | |
397 | actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]), | |
398 | default: true, | |
399 | default_enabled: true, | |
400 | }, | |
401 | PushRule { | |
402 | rule_id: Cow::Borrowed( | |
403 | "global/underride/.org.matrix.msc3933.rule.extensible.audio.room_one_to_one", | |
404 | ), | |
405 | priority_class: 1, | |
406 | conditions: Cow::Borrowed(&[ | |
407 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
408 | key: Cow::Borrowed("type"), | |
409 | // MSC3933: Type changed from template rule - see MSC. | |
410 | pattern: Some(Cow::Borrowed("org.matrix.msc1767.audio")), | |
411 | pattern_type: None, | |
412 | })), | |
413 | Condition::Known(KnownCondition::RoomMemberCount { | |
414 | is: Some(Cow::Borrowed("2")), | |
415 | }), | |
416 | // MSC3933: Add condition on top of template rule - see MSC. | |
417 | Condition::Known(KnownCondition::RoomVersionSupports { | |
418 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
419 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
420 | }), | |
421 | ]), | |
422 | actions: Cow::Borrowed(&[Action::Notify, SOUND_ACTION, HIGHLIGHT_FALSE_ACTION]), | |
423 | default: true, | |
424 | default_enabled: true, | |
425 | }, | |
426 | PushRule { | |
277 | 427 | rule_id: Cow::Borrowed("global/underride/.m.rule.message"), |
278 | 428 | priority_class: 1, |
279 | 429 | conditions: Cow::Borrowed(&[Condition::Known(KnownCondition::EventMatch( |
297 | 447 | pattern_type: None, |
298 | 448 | }, |
299 | 449 | ))]), |
450 | actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), | |
451 | default: true, | |
452 | default_enabled: true, | |
453 | }, | |
454 | PushRule { | |
455 | rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.encrypted"), | |
456 | priority_class: 1, | |
457 | conditions: Cow::Borrowed(&[ | |
458 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
459 | key: Cow::Borrowed("type"), | |
460 | // MSC3933: Type changed from template rule - see MSC. | |
461 | pattern: Some(Cow::Borrowed("m.encrypted")), | |
462 | pattern_type: None, | |
463 | })), | |
464 | // MSC3933: Add condition on top of template rule - see MSC. | |
465 | Condition::Known(KnownCondition::RoomVersionSupports { | |
466 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
467 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
468 | }), | |
469 | ]), | |
470 | actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), | |
471 | default: true, | |
472 | default_enabled: true, | |
473 | }, | |
474 | PushRule { | |
475 | rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.message"), | |
476 | priority_class: 1, | |
477 | conditions: Cow::Borrowed(&[ | |
478 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
479 | key: Cow::Borrowed("type"), | |
480 | // MSC3933: Type changed from template rule - see MSC. | |
481 | pattern: Some(Cow::Borrowed("m.message")), | |
482 | pattern_type: None, | |
483 | })), | |
484 | // MSC3933: Add condition on top of template rule - see MSC. | |
485 | Condition::Known(KnownCondition::RoomVersionSupports { | |
486 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
487 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
488 | }), | |
489 | ]), | |
490 | actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), | |
491 | default: true, | |
492 | default_enabled: true, | |
493 | }, | |
494 | PushRule { | |
495 | rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.file"), | |
496 | priority_class: 1, | |
497 | conditions: Cow::Borrowed(&[ | |
498 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
499 | key: Cow::Borrowed("type"), | |
500 | // MSC3933: Type changed from template rule - see MSC. | |
501 | pattern: Some(Cow::Borrowed("m.file")), | |
502 | pattern_type: None, | |
503 | })), | |
504 | // MSC3933: Add condition on top of template rule - see MSC. | |
505 | Condition::Known(KnownCondition::RoomVersionSupports { | |
506 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
507 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
508 | }), | |
509 | ]), | |
510 | actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), | |
511 | default: true, | |
512 | default_enabled: true, | |
513 | }, | |
514 | PushRule { | |
515 | rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.image"), | |
516 | priority_class: 1, | |
517 | conditions: Cow::Borrowed(&[ | |
518 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
519 | key: Cow::Borrowed("type"), | |
520 | // MSC3933: Type changed from template rule - see MSC. | |
521 | pattern: Some(Cow::Borrowed("m.image")), | |
522 | pattern_type: None, | |
523 | })), | |
524 | // MSC3933: Add condition on top of template rule - see MSC. | |
525 | Condition::Known(KnownCondition::RoomVersionSupports { | |
526 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
527 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
528 | }), | |
529 | ]), | |
530 | actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), | |
531 | default: true, | |
532 | default_enabled: true, | |
533 | }, | |
534 | PushRule { | |
535 | rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.video"), | |
536 | priority_class: 1, | |
537 | conditions: Cow::Borrowed(&[ | |
538 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
539 | key: Cow::Borrowed("type"), | |
540 | // MSC3933: Type changed from template rule - see MSC. | |
541 | pattern: Some(Cow::Borrowed("m.video")), | |
542 | pattern_type: None, | |
543 | })), | |
544 | // MSC3933: Add condition on top of template rule - see MSC. | |
545 | Condition::Known(KnownCondition::RoomVersionSupports { | |
546 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
547 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
548 | }), | |
549 | ]), | |
550 | actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), | |
551 | default: true, | |
552 | default_enabled: true, | |
553 | }, | |
554 | PushRule { | |
555 | rule_id: Cow::Borrowed("global/underride/.org.matrix.msc1767.rule.extensible.audio"), | |
556 | priority_class: 1, | |
557 | conditions: Cow::Borrowed(&[ | |
558 | Condition::Known(KnownCondition::EventMatch(EventMatchCondition { | |
559 | key: Cow::Borrowed("type"), | |
560 | // MSC3933: Type changed from template rule - see MSC. | |
561 | pattern: Some(Cow::Borrowed("m.audio")), | |
562 | pattern_type: None, | |
563 | })), | |
564 | // MSC3933: Add condition on top of template rule - see MSC. | |
565 | Condition::Known(KnownCondition::RoomVersionSupports { | |
566 | // RoomVersionFeatures::ExtensibleEvents.as_str(), ideally | |
567 | feature: Cow::Borrowed("org.matrix.msc3932.extensible_events"), | |
568 | }), | |
569 | ]), | |
300 | 570 | actions: Cow::Borrowed(&[Action::Notify, HIGHLIGHT_FALSE_ACTION]), |
301 | 571 | default: true, |
302 | 572 | default_enabled: true, |
11 | 11 | // See the License for the specific language governing permissions and |
12 | 12 | // limitations under the License. |
13 | 13 | |
14 | use std::borrow::Cow; | |
14 | 15 | use std::collections::BTreeMap; |
15 | 16 | |
17 | use crate::push::{PushRule, PushRules}; | |
16 | 18 | use anyhow::{Context, Error}; |
17 | 19 | use lazy_static::lazy_static; |
18 | 20 | use log::warn; |
28 | 30 | lazy_static! { |
29 | 31 | /// Used to parse the `is` clause in the room member count condition. |
30 | 32 | static ref INEQUALITY_EXPR: Regex = Regex::new(r"^([=<>]*)([0-9]+)$").expect("valid regex"); |
33 | ||
34 | /// Used to determine which MSC3931 room version feature flags are actually known to | |
35 | /// the push evaluator. | |
36 | static ref KNOWN_RVER_FLAGS: Vec<String> = vec![ | |
37 | RoomVersionFeatures::ExtensibleEvents.as_str().to_string(), | |
38 | ]; | |
39 | ||
40 | /// The "safe" rule IDs which are not affected by MSC3932's behaviour (room versions which | |
41 | /// declare Extensible Events support ultimately *disable* push rules which do not declare | |
42 | /// *any* MSC3931 room_version_supports condition). | |
43 | static ref SAFE_EXTENSIBLE_EVENTS_RULE_IDS: Vec<String> = vec![ | |
44 | "global/override/.m.rule.master".to_string(), | |
45 | "global/override/.m.rule.roomnotif".to_string(), | |
46 | "global/content/.m.rule.contains_user_name".to_string(), | |
47 | ]; | |
48 | } | |
49 | ||
50 | enum RoomVersionFeatures { | |
51 | ExtensibleEvents, | |
52 | } | |
53 | ||
54 | impl RoomVersionFeatures { | |
55 | fn as_str(&self) -> &'static str { | |
56 | match self { | |
57 | RoomVersionFeatures::ExtensibleEvents => "org.matrix.msc3932.extensible_events", | |
58 | } | |
59 | } | |
31 | 60 | } |
32 | 61 | |
33 | 62 | /// Allows running a set of push rules against a particular event. |
56 | 85 | |
57 | 86 | /// If msc3664, push rules for related events, is enabled. |
58 | 87 | related_event_match_enabled: bool, |
88 | ||
89 | /// If MSC3931 is applicable, the feature flags for the room version. | |
90 | room_version_feature_flags: Vec<String>, | |
91 | ||
92 | /// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same | |
93 | /// flag as MSC1767 (extensible events core). | |
94 | msc3931_enabled: bool, | |
59 | 95 | } |
60 | 96 | |
61 | 97 | #[pymethods] |
69 | 105 | notification_power_levels: BTreeMap<String, i64>, |
70 | 106 | related_events_flattened: BTreeMap<String, BTreeMap<String, String>>, |
71 | 107 | related_event_match_enabled: bool, |
108 | room_version_feature_flags: Vec<String>, | |
109 | msc3931_enabled: bool, | |
72 | 110 | ) -> Result<Self, Error> { |
73 | 111 | let body = flattened_keys |
74 | 112 | .get("content.body") |
83 | 121 | sender_power_level, |
84 | 122 | related_events_flattened, |
85 | 123 | related_event_match_enabled, |
124 | room_version_feature_flags, | |
125 | msc3931_enabled, | |
86 | 126 | }) |
87 | 127 | } |
88 | 128 | |
105 | 145 | continue; |
106 | 146 | } |
107 | 147 | |
148 | let rule_id = &push_rule.rule_id().to_string(); | |
149 | let extev_flag = &RoomVersionFeatures::ExtensibleEvents.as_str().to_string(); | |
150 | let supports_extensible_events = self.room_version_feature_flags.contains(extev_flag); | |
151 | let safe_from_rver_condition = SAFE_EXTENSIBLE_EVENTS_RULE_IDS.contains(rule_id); | |
152 | let mut has_rver_condition = false; | |
153 | ||
108 | 154 | for condition in push_rule.conditions.iter() { |
155 | has_rver_condition = has_rver_condition | |
156 | || match condition { | |
157 | Condition::Known(known) => match known { | |
158 | // per MSC3932, we just need *any* room version condition to match | |
159 | KnownCondition::RoomVersionSupports { feature: _ } => true, | |
160 | _ => false, | |
161 | }, | |
162 | _ => false, | |
163 | }; | |
109 | 164 | match self.match_condition(condition, user_id, display_name) { |
110 | 165 | Ok(true) => {} |
111 | 166 | Ok(false) => continue 'outer, |
114 | 169 | continue 'outer; |
115 | 170 | } |
116 | 171 | } |
172 | } | |
173 | ||
174 | // MSC3932: Disable push rules in extensible event-supporting room versions if they | |
175 | // don't describe *any* MSC3931 room version condition, unless the rule is on the | |
176 | // safe list. | |
177 | if !has_rver_condition && !safe_from_rver_condition && supports_extensible_events { | |
178 | continue; | |
117 | 179 | } |
118 | 180 | |
119 | 181 | let actions = push_rule |
201 | 263 | *sender_power_level >= required_level |
202 | 264 | } else { |
203 | 265 | false |
266 | } | |
267 | } | |
268 | KnownCondition::RoomVersionSupports { feature } => { | |
269 | if !self.msc3931_enabled { | |
270 | false | |
271 | } else { | |
272 | let flag = feature.to_string(); | |
273 | KNOWN_RVER_FLAGS.contains(&flag) | |
274 | && self.room_version_feature_flags.contains(&flag) | |
204 | 275 | } |
205 | 276 | } |
206 | 277 | }; |
361 | 432 | BTreeMap::new(), |
362 | 433 | BTreeMap::new(), |
363 | 434 | true, |
435 | vec![], | |
436 | true, | |
364 | 437 | ) |
365 | 438 | .unwrap(); |
366 | 439 | |
367 | 440 | let result = evaluator.run(&FilteredPushRules::default(), None, Some("bob")); |
368 | 441 | assert_eq!(result.len(), 3); |
369 | 442 | } |
443 | ||
444 | #[test] | |
445 | fn test_requires_room_version_supports_condition() { | |
446 | let mut flattened_keys = BTreeMap::new(); | |
447 | flattened_keys.insert("content.body".to_string(), "foo bar bob hello".to_string()); | |
448 | let flags = vec![RoomVersionFeatures::ExtensibleEvents.as_str().to_string()]; | |
449 | let evaluator = PushRuleEvaluator::py_new( | |
450 | flattened_keys, | |
451 | 10, | |
452 | Some(0), | |
453 | BTreeMap::new(), | |
454 | BTreeMap::new(), | |
455 | false, | |
456 | flags, | |
457 | true, | |
458 | ) | |
459 | .unwrap(); | |
460 | ||
461 | // first test: are the master and contains_user_name rules excluded from the "requires room | |
462 | // version condition" check? | |
463 | let mut result = evaluator.run( | |
464 | &FilteredPushRules::default(), | |
465 | Some("@bob:example.org"), | |
466 | None, | |
467 | ); | |
468 | assert_eq!(result.len(), 3); | |
469 | ||
470 | // second test: if an appropriate push rule is in play, does it get handled? | |
471 | let custom_rule = PushRule { | |
472 | rule_id: Cow::from("global/underride/.org.example.extensible"), | |
473 | priority_class: 1, // underride | |
474 | conditions: Cow::from(vec![Condition::Known( | |
475 | KnownCondition::RoomVersionSupports { | |
476 | feature: Cow::from(RoomVersionFeatures::ExtensibleEvents.as_str().to_string()), | |
477 | }, | |
478 | )]), | |
479 | actions: Cow::from(vec![Action::Notify]), | |
480 | default: false, | |
481 | default_enabled: true, | |
482 | }; | |
483 | let rules = PushRules::new(vec![custom_rule]); | |
484 | result = evaluator.run( | |
485 | &FilteredPushRules::py_new(rules, BTreeMap::new(), true, true), | |
486 | None, | |
487 | None, | |
488 | ); | |
489 | assert_eq!(result.len(), 1); | |
490 | } |
276 | 276 | SenderNotificationPermission { |
277 | 277 | key: Cow<'static, str>, |
278 | 278 | }, |
279 | #[serde(rename = "org.matrix.msc3931.room_version_supports")] | |
280 | RoomVersionSupports { | |
281 | feature: Cow<'static, str>, | |
282 | }, | |
279 | 283 | } |
280 | 284 | |
281 | 285 | impl IntoPy<PyObject> for Condition { |
407 | 411 | push_rules: PushRules, |
408 | 412 | enabled_map: BTreeMap<String, bool>, |
409 | 413 | msc3664_enabled: bool, |
414 | msc1767_enabled: bool, | |
410 | 415 | } |
411 | 416 | |
412 | 417 | #[pymethods] |
416 | 421 | push_rules: PushRules, |
417 | 422 | enabled_map: BTreeMap<String, bool>, |
418 | 423 | msc3664_enabled: bool, |
424 | msc1767_enabled: bool, | |
419 | 425 | ) -> Self { |
420 | 426 | Self { |
421 | 427 | push_rules, |
422 | 428 | enabled_map, |
423 | 429 | msc3664_enabled, |
430 | msc1767_enabled, | |
424 | 431 | } |
425 | 432 | } |
426 | 433 | |
445 | 452 | return false; |
446 | 453 | } |
447 | 454 | |
455 | if !self.msc1767_enabled && rule.rule_id.contains("org.matrix.msc1767") { | |
456 | return false; | |
457 | } | |
458 | ||
448 | 459 | true |
449 | 460 | }) |
450 | 461 | .map(|r| { |
491 | 502 | } |
492 | 503 | |
493 | 504 | #[test] |
505 | fn test_deserialize_unstable_msc3931_condition() { | |
506 | let json = | |
507 | r#"{"kind":"org.matrix.msc3931.room_version_supports","feature":"org.example.feature"}"#; | |
508 | ||
509 | let condition: Condition = serde_json::from_str(json).unwrap(); | |
510 | assert!(matches!( | |
511 | condition, | |
512 | Condition::Known(KnownCondition::RoomVersionSupports { feature: _ }) | |
513 | )); | |
514 | } | |
515 | ||
516 | #[test] | |
494 | 517 | fn test_deserialize_custom_condition() { |
495 | 518 | let json = r#"{"kind":"custom_tag"}"#; |
496 | 519 |
161 | 161 | # We only test faster room joins on monoliths, because they are purposefully |
162 | 162 | # being developed without worker support to start with. |
163 | 163 | # |
164 | # The tests for importing historical messages (MSC2716) and jump to date (MSC3030) | |
165 | # also only pass with monoliths, currently. | |
166 | test_tags="$test_tags,faster_joins,msc2716,msc3030" | |
164 | # The tests for importing historical messages (MSC2716) also only pass with monoliths, | |
165 | # currently. | |
166 | test_tags="$test_tags,faster_joins,msc2716" | |
167 | 167 | fi |
168 | 168 | |
169 | 169 |
45 | 45 | import signedjson.types |
46 | 46 | import srvlookup |
47 | 47 | import yaml |
48 | from requests import PreparedRequest, Response | |
48 | 49 | from requests.adapters import HTTPAdapter |
49 | 50 | from urllib3 import HTTPConnectionPool |
50 | 51 | |
51 | 52 | # uncomment the following to enable debug logging of http requests |
52 | # from httplib import HTTPConnection | |
53 | # from http.client import HTTPConnection | |
53 | 54 | # HTTPConnection.debuglevel = 1 |
54 | 55 | |
55 | 56 | |
102 | 103 | destination: str, |
103 | 104 | path: str, |
104 | 105 | content: Optional[str], |
106 | verify_tls: bool, | |
105 | 107 | ) -> requests.Response: |
106 | 108 | if method is None: |
107 | 109 | if content is None: |
140 | 142 | s.mount("matrix://", MatrixConnectionAdapter()) |
141 | 143 | |
142 | 144 | headers: Dict[str, str] = { |
143 | "Host": destination, | |
144 | 145 | "Authorization": authorization_headers[0], |
145 | 146 | } |
146 | 147 | |
151 | 152 | method=method, |
152 | 153 | url=dest, |
153 | 154 | headers=headers, |
154 | verify=False, | |
155 | verify=verify_tls, | |
155 | 156 | data=content, |
156 | 157 | stream=True, |
157 | 158 | ) |
200 | 201 | ) |
201 | 202 | |
202 | 203 | parser.add_argument("--body", help="Data to send as the body of the HTTP request") |
204 | ||
205 | parser.add_argument( | |
206 | "--insecure", | |
207 | action="store_true", | |
208 | help="Disable TLS certificate verification", | |
209 | ) | |
203 | 210 | |
204 | 211 | parser.add_argument( |
205 | 212 | "path", help="request path, including the '/_matrix/federation/...' prefix." |
226 | 233 | args.destination, |
227 | 234 | args.path, |
228 | 235 | content=args.body, |
236 | verify_tls=not args.insecure, | |
229 | 237 | ) |
230 | 238 | |
231 | 239 | sys.stderr.write("Status Code: %d\n" % (result.status_code,)) |
253 | 261 | |
254 | 262 | |
255 | 263 | class MatrixConnectionAdapter(HTTPAdapter): |
264 | def send( | |
265 | self, | |
266 | request: PreparedRequest, | |
267 | *args: Any, | |
268 | **kwargs: Any, | |
269 | ) -> Response: | |
270 | # overrides the send() method in the base class. | |
271 | ||
272 | # We need to look for .well-known redirects before passing the request up to | |
273 | # HTTPAdapter.send(). | |
274 | assert isinstance(request.url, str) | |
275 | parsed = urlparse.urlsplit(request.url) | |
276 | server_name = parsed.netloc | |
277 | well_known = self._get_well_known(parsed.netloc) | |
278 | ||
279 | if well_known: | |
280 | server_name = well_known | |
281 | ||
282 | # replace the scheme in the uri with https, so that cert verification is done | |
283 | # also replace the hostname if we got a .well-known result | |
284 | request.url = urlparse.urlunsplit( | |
285 | ("https", server_name, parsed.path, parsed.query, parsed.fragment) | |
286 | ) | |
287 | ||
288 | # at this point we also add the host header (otherwise urllib will add one | |
289 | # based on the `host` from the connection returned by `get_connection`, | |
290 | # which will be wrong if there is an SRV record). | |
291 | request.headers["Host"] = server_name | |
292 | ||
293 | return super().send(request, *args, **kwargs) | |
294 | ||
295 | def get_connection( | |
296 | self, url: str, proxies: Optional[Dict[str, str]] = None | |
297 | ) -> HTTPConnectionPool: | |
298 | # overrides the get_connection() method in the base class | |
299 | parsed = urlparse.urlsplit(url) | |
300 | (host, port, ssl_server_name) = self._lookup(parsed.netloc) | |
301 | print( | |
302 | f"Connecting to {host}:{port} with SNI {ssl_server_name}", file=sys.stderr | |
303 | ) | |
304 | return self.poolmanager.connection_from_host( | |
305 | host, | |
306 | port=port, | |
307 | scheme="https", | |
308 | pool_kwargs={"server_hostname": ssl_server_name}, | |
309 | ) | |
310 | ||
256 | 311 | @staticmethod |
257 | def lookup(s: str, skip_well_known: bool = False) -> Tuple[str, int]: | |
258 | if s[-1] == "]": | |
312 | def _lookup(server_name: str) -> Tuple[str, int, str]: | |
313 | """ | |
314 | Do an SRV lookup on a server name and return the host:port to connect to | |
315 | Given the server_name (after any .well-known lookup), return the host, port and | |
316 | the ssl server name | |
317 | """ | |
318 | if server_name[-1] == "]": | |
259 | 319 | # ipv6 literal (with no port) |
260 | return s, 8448 | |
261 | ||
262 | if ":" in s: | |
263 | out = s.rsplit(":", 1) | |
320 | return server_name, 8448, server_name | |
321 | ||
322 | if ":" in server_name: | |
323 | # explicit port | |
324 | out = server_name.rsplit(":", 1) | |
264 | 325 | try: |
265 | 326 | port = int(out[1]) |
266 | 327 | except ValueError: |
267 | raise ValueError("Invalid host:port '%s'" % s) | |
268 | return out[0], port | |
269 | ||
270 | # try a .well-known lookup | |
271 | if not skip_well_known: | |
272 | well_known = MatrixConnectionAdapter.get_well_known(s) | |
273 | if well_known: | |
274 | return MatrixConnectionAdapter.lookup(well_known, skip_well_known=True) | |
328 | raise ValueError("Invalid host:port '%s'" % (server_name,)) | |
329 | return out[0], port, out[0] | |
275 | 330 | |
276 | 331 | try: |
277 | srv = srvlookup.lookup("matrix", "tcp", s)[0] | |
278 | return srv.host, srv.port | |
332 | srv = srvlookup.lookup("matrix", "tcp", server_name)[0] | |
333 | print( | |
334 | f"SRV lookup on _matrix._tcp.{server_name} gave {srv}", | |
335 | file=sys.stderr, | |
336 | ) | |
337 | return srv.host, srv.port, server_name | |
279 | 338 | except Exception: |
280 | return s, 8448 | |
339 | return server_name, 8448, server_name | |
281 | 340 | |
282 | 341 | @staticmethod |
283 | def get_well_known(server_name: str) -> Optional[str]: | |
284 | uri = "https://%s/.well-known/matrix/server" % (server_name,) | |
285 | print("fetching %s" % (uri,), file=sys.stderr) | |
342 | def _get_well_known(server_name: str) -> Optional[str]: | |
343 | if ":" in server_name: | |
344 | # explicit port, or ipv6 literal. Either way, no .well-known | |
345 | return None | |
346 | ||
347 | # TODO: check for ipv4 literals | |
348 | ||
349 | uri = f"https://{server_name}/.well-known/matrix/server" | |
350 | print(f"fetching {uri}", file=sys.stderr) | |
286 | 351 | |
287 | 352 | try: |
288 | 353 | resp = requests.get(uri) |
303 | 368 | print("Invalid response from %s: %s" % (uri, e), file=sys.stderr) |
304 | 369 | return None |
305 | 370 | |
306 | def get_connection( | |
307 | self, url: str, proxies: Optional[Dict[str, str]] = None | |
308 | ) -> HTTPConnectionPool: | |
309 | parsed = urlparse.urlparse(url) | |
310 | ||
311 | (host, port) = self.lookup(parsed.netloc) | |
312 | netloc = "%s:%d" % (host, port) | |
313 | print("Connecting to %s" % (netloc,), file=sys.stderr) | |
314 | url = urlparse.urlunparse( | |
315 | ("https", netloc, parsed.path, parsed.params, parsed.query, parsed.fragment) | |
316 | ) | |
317 | return super().get_connection(url, proxies) | |
318 | ||
319 | 371 | |
320 | 372 | if __name__ == "__main__": |
321 | 373 | main() |
25 | 25 | |
26 | 26 | class FilteredPushRules: |
27 | 27 | def __init__( |
28 | self, push_rules: PushRules, enabled_map: Dict[str, bool], msc3664_enabled: bool | |
28 | self, | |
29 | push_rules: PushRules, | |
30 | enabled_map: Dict[str, bool], | |
31 | msc3664_enabled: bool, | |
32 | msc1767_enabled: bool, | |
29 | 33 | ): ... |
30 | 34 | def rules(self) -> Collection[Tuple[PushRule, bool]]: ... |
31 | 35 | |
40 | 44 | notification_power_levels: Mapping[str, int], |
41 | 45 | related_events_flattened: Mapping[str, Mapping[str, str]], |
42 | 46 | related_event_match_enabled: bool, |
47 | room_version_feature_flags: list[str], | |
48 | msc3931_enabled: bool, | |
43 | 49 | ): ... |
44 | 50 | def run( |
45 | 51 | self, |
712 | 712 | set to the reason code from the HTTP response. |
713 | 713 | |
714 | 714 | Returns: |
715 | SynapseError: | |
715 | The error converted to a SynapseError. | |
716 | 716 | """ |
717 | 717 | # try to parse the body as json, to get better errcode/msg, but |
718 | 718 | # default to M_UNKNOWN with the HTTP status as the error text |
11 | 11 | # See the License for the specific language governing permissions and |
12 | 12 | # limitations under the License. |
13 | 13 | |
14 | from typing import Callable, Dict, Optional | |
14 | from typing import Callable, Dict, List, Optional | |
15 | 15 | |
16 | 16 | import attr |
17 | 17 | |
48 | 48 | class RoomDisposition: |
49 | 49 | STABLE = "stable" |
50 | 50 | UNSTABLE = "unstable" |
51 | ||
52 | ||
53 | class PushRuleRoomFlag: | |
54 | """Enum for listing possible MSC3931 room version feature flags, for push rules""" | |
55 | ||
56 | # MSC3932: Room version supports MSC1767 Extensible Events. | |
57 | EXTENSIBLE_EVENTS = "org.matrix.msc3932.extensible_events" | |
51 | 58 | |
52 | 59 | |
53 | 60 | @attr.s(slots=True, frozen=True, auto_attribs=True) |
90 | 97 | msc3787_knock_restricted_join_rule: bool |
91 | 98 | # MSC3667: Enforce integer power levels |
92 | 99 | msc3667_int_only_power_levels: bool |
100 | # MSC3931: Adds a push rule condition for "room version feature flags", making | |
101 | # some push rules room version dependent. Note that adding a flag to this list | |
102 | # is not enough to mark it "supported": the push rule evaluator also needs to | |
103 | # support the flag. Unknown flags are ignored by the evaluator, making conditions | |
104 | # fail if used. | |
105 | msc3931_push_features: List[str] # values from PushRuleRoomFlag | |
93 | 106 | |
94 | 107 | |
95 | 108 | class RoomVersions: |
110 | 123 | msc2716_redactions=False, |
111 | 124 | msc3787_knock_restricted_join_rule=False, |
112 | 125 | msc3667_int_only_power_levels=False, |
126 | msc3931_push_features=[], | |
113 | 127 | ) |
114 | 128 | V2 = RoomVersion( |
115 | 129 | "2", |
128 | 142 | msc2716_redactions=False, |
129 | 143 | msc3787_knock_restricted_join_rule=False, |
130 | 144 | msc3667_int_only_power_levels=False, |
145 | msc3931_push_features=[], | |
131 | 146 | ) |
132 | 147 | V3 = RoomVersion( |
133 | 148 | "3", |
146 | 161 | msc2716_redactions=False, |
147 | 162 | msc3787_knock_restricted_join_rule=False, |
148 | 163 | msc3667_int_only_power_levels=False, |
164 | msc3931_push_features=[], | |
149 | 165 | ) |
150 | 166 | V4 = RoomVersion( |
151 | 167 | "4", |
164 | 180 | msc2716_redactions=False, |
165 | 181 | msc3787_knock_restricted_join_rule=False, |
166 | 182 | msc3667_int_only_power_levels=False, |
183 | msc3931_push_features=[], | |
167 | 184 | ) |
168 | 185 | V5 = RoomVersion( |
169 | 186 | "5", |
182 | 199 | msc2716_redactions=False, |
183 | 200 | msc3787_knock_restricted_join_rule=False, |
184 | 201 | msc3667_int_only_power_levels=False, |
202 | msc3931_push_features=[], | |
185 | 203 | ) |
186 | 204 | V6 = RoomVersion( |
187 | 205 | "6", |
200 | 218 | msc2716_redactions=False, |
201 | 219 | msc3787_knock_restricted_join_rule=False, |
202 | 220 | msc3667_int_only_power_levels=False, |
221 | msc3931_push_features=[], | |
203 | 222 | ) |
204 | 223 | MSC2176 = RoomVersion( |
205 | 224 | "org.matrix.msc2176", |
218 | 237 | msc2716_redactions=False, |
219 | 238 | msc3787_knock_restricted_join_rule=False, |
220 | 239 | msc3667_int_only_power_levels=False, |
240 | msc3931_push_features=[], | |
221 | 241 | ) |
222 | 242 | V7 = RoomVersion( |
223 | 243 | "7", |
236 | 256 | msc2716_redactions=False, |
237 | 257 | msc3787_knock_restricted_join_rule=False, |
238 | 258 | msc3667_int_only_power_levels=False, |
259 | msc3931_push_features=[], | |
239 | 260 | ) |
240 | 261 | V8 = RoomVersion( |
241 | 262 | "8", |
254 | 275 | msc2716_redactions=False, |
255 | 276 | msc3787_knock_restricted_join_rule=False, |
256 | 277 | msc3667_int_only_power_levels=False, |
278 | msc3931_push_features=[], | |
257 | 279 | ) |
258 | 280 | V9 = RoomVersion( |
259 | 281 | "9", |
272 | 294 | msc2716_redactions=False, |
273 | 295 | msc3787_knock_restricted_join_rule=False, |
274 | 296 | msc3667_int_only_power_levels=False, |
297 | msc3931_push_features=[], | |
275 | 298 | ) |
276 | 299 | MSC3787 = RoomVersion( |
277 | 300 | "org.matrix.msc3787", |
290 | 313 | msc2716_redactions=False, |
291 | 314 | msc3787_knock_restricted_join_rule=True, |
292 | 315 | msc3667_int_only_power_levels=False, |
316 | msc3931_push_features=[], | |
293 | 317 | ) |
294 | 318 | V10 = RoomVersion( |
295 | 319 | "10", |
308 | 332 | msc2716_redactions=False, |
309 | 333 | msc3787_knock_restricted_join_rule=True, |
310 | 334 | msc3667_int_only_power_levels=True, |
335 | msc3931_push_features=[], | |
311 | 336 | ) |
312 | 337 | MSC2716v4 = RoomVersion( |
313 | 338 | "org.matrix.msc2716v4", |
326 | 351 | msc2716_redactions=True, |
327 | 352 | msc3787_knock_restricted_join_rule=False, |
328 | 353 | msc3667_int_only_power_levels=False, |
354 | msc3931_push_features=[], | |
355 | ) | |
356 | MSC1767v10 = RoomVersion( | |
357 | # MSC1767 (Extensible Events) based on room version "10" | |
358 | "org.matrix.msc1767.10", | |
359 | RoomDisposition.UNSTABLE, | |
360 | EventFormatVersions.ROOM_V4_PLUS, | |
361 | StateResolutionVersions.V2, | |
362 | enforce_key_validity=True, | |
363 | special_case_aliases_auth=False, | |
364 | strict_canonicaljson=True, | |
365 | limit_notifications_power_levels=True, | |
366 | msc2176_redaction_rules=False, | |
367 | msc3083_join_rules=True, | |
368 | msc3375_redaction_rules=True, | |
369 | msc2403_knocking=True, | |
370 | msc2716_historical=False, | |
371 | msc2716_redactions=False, | |
372 | msc3787_knock_restricted_join_rule=True, | |
373 | msc3667_int_only_power_levels=True, | |
374 | msc3931_push_features=[PushRuleRoomFlag.EXTENSIBLE_EVENTS], | |
329 | 375 | ) |
330 | 376 | |
331 | 377 |
265 | 265 | reactor.callWhenRunning(lambda: defer.ensureDeferred(wrapper())) |
266 | 266 | |
267 | 267 | |
268 | def listen_metrics( | |
269 | bind_addresses: Iterable[str], port: int, enable_legacy_metric_names: bool | |
270 | ) -> None: | |
268 | def listen_metrics(bind_addresses: Iterable[str], port: int) -> None: | |
271 | 269 | """ |
272 | 270 | Start Prometheus metrics server. |
273 | 271 | """ |
274 | 272 | from prometheus_client import start_http_server as start_http_server_prometheus |
275 | 273 | |
276 | from synapse.metrics import ( | |
277 | RegistryProxy, | |
278 | start_http_server as start_http_server_legacy, | |
279 | ) | |
274 | from synapse.metrics import RegistryProxy | |
280 | 275 | |
281 | 276 | for host in bind_addresses: |
282 | 277 | logger.info("Starting metrics listener on %s:%d", host, port) |
283 | if enable_legacy_metric_names: | |
284 | start_http_server_legacy(port, addr=host, registry=RegistryProxy) | |
285 | else: | |
286 | _set_prometheus_client_use_created_metrics(False) | |
287 | start_http_server_prometheus(port, addr=host, registry=RegistryProxy) | |
278 | _set_prometheus_client_use_created_metrics(False) | |
279 | start_http_server_prometheus(port, addr=host, registry=RegistryProxy) | |
288 | 280 | |
289 | 281 | |
290 | 282 | def _set_prometheus_client_use_created_metrics(new_value: bool) -> None: |
13 | 13 | # limitations under the License. |
14 | 14 | import logging |
15 | 15 | import sys |
16 | from typing import Dict, List, Optional, Tuple | |
17 | ||
18 | from twisted.internet import address | |
16 | from typing import Dict, List | |
17 | ||
19 | 18 | from twisted.web.resource import Resource |
20 | 19 | |
21 | 20 | import synapse |
22 | 21 | import synapse.events |
23 | from synapse.api.errors import HttpResponseException, RequestSendFailed, SynapseError | |
24 | 22 | from synapse.api.urls import ( |
25 | 23 | CLIENT_API_PREFIX, |
26 | 24 | FEDERATION_PREFIX, |
42 | 40 | from synapse.config.server import ListenerConfig |
43 | 41 | from synapse.federation.transport.server import TransportLayerServer |
44 | 42 | from synapse.http.server import JsonResource, OptionsResource |
45 | from synapse.http.servlet import RestServlet, parse_json_object_from_request | |
46 | from synapse.http.site import SynapseRequest | |
47 | 43 | from synapse.logging.context import LoggingContext |
48 | 44 | from synapse.metrics import METRICS_PREFIX, MetricsResource, RegistryProxy |
49 | 45 | from synapse.replication.http import REPLICATION_PREFIX, ReplicationRestResource |
69 | 65 | versions, |
70 | 66 | voip, |
71 | 67 | ) |
72 | from synapse.rest.client._base import client_patterns | |
73 | 68 | from synapse.rest.client.account import ThreepidRestServlet, WhoamiRestServlet |
74 | 69 | from synapse.rest.client.devices import DevicesRestServlet |
75 | 70 | from synapse.rest.client.keys import ( |
76 | 71 | KeyChangesServlet, |
77 | 72 | KeyQueryServlet, |
73 | KeyUploadServlet, | |
78 | 74 | OneTimeKeyServlet, |
79 | 75 | ) |
80 | 76 | from synapse.rest.client.register import ( |
131 | 127 | from synapse.storage.databases.main.ui_auth import UIAuthWorkerStore |
132 | 128 | from synapse.storage.databases.main.user_directory import UserDirectoryStore |
133 | 129 | from synapse.storage.databases.main.user_erasure_store import UserErasureWorkerStore |
134 | from synapse.types import JsonDict | |
135 | 130 | from synapse.util import SYNAPSE_VERSION |
136 | 131 | from synapse.util.httpresourcetree import create_resource_tree |
137 | 132 | |
138 | 133 | logger = logging.getLogger("synapse.app.generic_worker") |
139 | ||
140 | ||
141 | class KeyUploadServlet(RestServlet): | |
142 | """An implementation of the `KeyUploadServlet` that responds to read only | |
143 | requests, but otherwise proxies through to the master instance. | |
144 | """ | |
145 | ||
146 | PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$") | |
147 | ||
148 | def __init__(self, hs: HomeServer): | |
149 | """ | |
150 | Args: | |
151 | hs: server | |
152 | """ | |
153 | super().__init__() | |
154 | self.auth = hs.get_auth() | |
155 | self.store = hs.get_datastores().main | |
156 | self.http_client = hs.get_simple_http_client() | |
157 | self.main_uri = hs.config.worker.worker_main_http_uri | |
158 | ||
159 | async def on_POST( | |
160 | self, request: SynapseRequest, device_id: Optional[str] | |
161 | ) -> Tuple[int, JsonDict]: | |
162 | requester = await self.auth.get_user_by_req(request, allow_guest=True) | |
163 | user_id = requester.user.to_string() | |
164 | body = parse_json_object_from_request(request) | |
165 | ||
166 | if device_id is not None: | |
167 | # passing the device_id here is deprecated; however, we allow it | |
168 | # for now for compatibility with older clients. | |
169 | if requester.device_id is not None and device_id != requester.device_id: | |
170 | logger.warning( | |
171 | "Client uploading keys for a different device " | |
172 | "(logged in as %s, uploading for %s)", | |
173 | requester.device_id, | |
174 | device_id, | |
175 | ) | |
176 | else: | |
177 | device_id = requester.device_id | |
178 | ||
179 | if device_id is None: | |
180 | raise SynapseError( | |
181 | 400, "To upload keys, you must pass device_id when authenticating" | |
182 | ) | |
183 | ||
184 | if body: | |
185 | # They're actually trying to upload something, proxy to main synapse. | |
186 | ||
187 | # Proxy headers from the original request, such as the auth headers | |
188 | # (in case the access token is there) and the original IP / | |
189 | # User-Agent of the request. | |
190 | headers: Dict[bytes, List[bytes]] = { | |
191 | header: list(request.requestHeaders.getRawHeaders(header, [])) | |
192 | for header in (b"Authorization", b"User-Agent") | |
193 | } | |
194 | # Add the previous hop to the X-Forwarded-For header. | |
195 | x_forwarded_for = list( | |
196 | request.requestHeaders.getRawHeaders(b"X-Forwarded-For", []) | |
197 | ) | |
198 | # we use request.client here, since we want the previous hop, not the | |
199 | # original client (as returned by request.getClientAddress()). | |
200 | if isinstance(request.client, (address.IPv4Address, address.IPv6Address)): | |
201 | previous_host = request.client.host.encode("ascii") | |
202 | # If the header exists, add to the comma-separated list of the first | |
203 | # instance of the header. Otherwise, generate a new header. | |
204 | if x_forwarded_for: | |
205 | x_forwarded_for = [x_forwarded_for[0] + b", " + previous_host] | |
206 | x_forwarded_for.extend(x_forwarded_for[1:]) | |
207 | else: | |
208 | x_forwarded_for = [previous_host] | |
209 | headers[b"X-Forwarded-For"] = x_forwarded_for | |
210 | ||
211 | # Replicate the original X-Forwarded-Proto header. Note that | |
212 | # XForwardedForRequest overrides isSecure() to give us the original protocol | |
213 | # used by the client, as opposed to the protocol used by our upstream proxy | |
214 | # - which is what we want here. | |
215 | headers[b"X-Forwarded-Proto"] = [ | |
216 | b"https" if request.isSecure() else b"http" | |
217 | ] | |
218 | ||
219 | try: | |
220 | result = await self.http_client.post_json_get_json( | |
221 | self.main_uri + request.uri.decode("ascii"), body, headers=headers | |
222 | ) | |
223 | except HttpResponseException as e: | |
224 | raise e.to_synapse_error() from e | |
225 | except RequestSendFailed as e: | |
226 | raise SynapseError(502, "Failed to talk to master") from e | |
227 | ||
228 | return 200, result | |
229 | else: | |
230 | # Just interested in counts. | |
231 | result = await self.store.count_e2e_one_time_keys(user_id, device_id) | |
232 | return 200, {"one_time_key_counts": result} | |
233 | 134 | |
234 | 135 | |
235 | 136 | class GenericWorkerSlavedStore( |
418 | 319 | _base.listen_metrics( |
419 | 320 | listener.bind_addresses, |
420 | 321 | listener.port, |
421 | enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics, | |
422 | 322 | ) |
423 | 323 | else: |
424 | 324 | logger.warning("Unsupported listener type: %s", listener.type) |
264 | 264 | _base.listen_metrics( |
265 | 265 | listener.bind_addresses, |
266 | 266 | listener.port, |
267 | enable_legacy_metric_names=self.config.metrics.enable_legacy_metrics, | |
268 | 267 | ) |
269 | 268 | else: |
270 | 269 | # this shouldn't happen, as the listener type should have been checked |
31 | 31 | |
32 | 32 | logger = logging.getLogger(__name__) |
33 | 33 | |
34 | # Type for the `device_one_time_key_counts` field in an appservice transaction | |
34 | # Type for the `device_one_time_keys_count` field in an appservice transaction | |
35 | 35 | # user ID -> {device ID -> {algorithm -> count}} |
36 | TransactionOneTimeKeyCounts = Dict[str, Dict[str, Dict[str, int]]] | |
36 | TransactionOneTimeKeysCount = Dict[str, Dict[str, Dict[str, int]]] | |
37 | 37 | |
38 | 38 | # Type for the `device_unused_fallback_key_types` field in an appservice transaction |
39 | 39 | # user ID -> {device ID -> [algorithm]} |
375 | 375 | events: List[EventBase], |
376 | 376 | ephemeral: List[JsonDict], |
377 | 377 | to_device_messages: List[JsonDict], |
378 | one_time_key_counts: TransactionOneTimeKeyCounts, | |
378 | one_time_keys_count: TransactionOneTimeKeysCount, | |
379 | 379 | unused_fallback_keys: TransactionUnusedFallbackKeys, |
380 | 380 | device_list_summary: DeviceListUpdates, |
381 | 381 | ): |
384 | 384 | self.events = events |
385 | 385 | self.ephemeral = ephemeral |
386 | 386 | self.to_device_messages = to_device_messages |
387 | self.one_time_key_counts = one_time_key_counts | |
387 | self.one_time_keys_count = one_time_keys_count | |
388 | 388 | self.unused_fallback_keys = unused_fallback_keys |
389 | 389 | self.device_list_summary = device_list_summary |
390 | 390 | |
401 | 401 | events=self.events, |
402 | 402 | ephemeral=self.ephemeral, |
403 | 403 | to_device_messages=self.to_device_messages, |
404 | one_time_key_counts=self.one_time_key_counts, | |
404 | one_time_keys_count=self.one_time_keys_count, | |
405 | 405 | unused_fallback_keys=self.unused_fallback_keys, |
406 | 406 | device_list_summary=self.device_list_summary, |
407 | 407 | txn_id=self.id, |
22 | 22 | from synapse.api.errors import CodeMessageException |
23 | 23 | from synapse.appservice import ( |
24 | 24 | ApplicationService, |
25 | TransactionOneTimeKeyCounts, | |
25 | TransactionOneTimeKeysCount, | |
26 | 26 | TransactionUnusedFallbackKeys, |
27 | 27 | ) |
28 | 28 | from synapse.events import EventBase |
261 | 261 | events: List[EventBase], |
262 | 262 | ephemeral: List[JsonDict], |
263 | 263 | to_device_messages: List[JsonDict], |
264 | one_time_key_counts: TransactionOneTimeKeyCounts, | |
264 | one_time_keys_count: TransactionOneTimeKeysCount, | |
265 | 265 | unused_fallback_keys: TransactionUnusedFallbackKeys, |
266 | 266 | device_list_summary: DeviceListUpdates, |
267 | 267 | txn_id: Optional[int] = None, |
309 | 309 | |
310 | 310 | # TODO: Update to stable prefixes once MSC3202 completes FCP merge |
311 | 311 | if service.msc3202_transaction_extensions: |
312 | if one_time_key_counts: | |
312 | if one_time_keys_count: | |
313 | 313 | body[ |
314 | 314 | "org.matrix.msc3202.device_one_time_key_counts" |
315 | ] = one_time_key_counts | |
315 | ] = one_time_keys_count | |
316 | body[ | |
317 | "org.matrix.msc3202.device_one_time_keys_count" | |
318 | ] = one_time_keys_count | |
316 | 319 | if unused_fallback_keys: |
317 | 320 | body[ |
318 | 321 | "org.matrix.msc3202.device_unused_fallback_key_types" |
63 | 63 | from synapse.appservice import ( |
64 | 64 | ApplicationService, |
65 | 65 | ApplicationServiceState, |
66 | TransactionOneTimeKeyCounts, | |
66 | TransactionOneTimeKeysCount, | |
67 | 67 | TransactionUnusedFallbackKeys, |
68 | 68 | ) |
69 | 69 | from synapse.appservice.api import ApplicationServiceApi |
257 | 257 | ): |
258 | 258 | return |
259 | 259 | |
260 | one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None | |
260 | one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None | |
261 | 261 | unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None |
262 | 262 | |
263 | 263 | if ( |
268 | 268 | # for the users which are mentioned in this transaction, |
269 | 269 | # as well as the appservice's sender. |
270 | 270 | ( |
271 | one_time_key_counts, | |
271 | one_time_keys_count, | |
272 | 272 | unused_fallback_keys, |
273 | 273 | ) = await self._compute_msc3202_otk_counts_and_fallback_keys( |
274 | 274 | service, events, ephemeral, to_device_messages_to_send |
280 | 280 | events, |
281 | 281 | ephemeral, |
282 | 282 | to_device_messages_to_send, |
283 | one_time_key_counts, | |
283 | one_time_keys_count, | |
284 | 284 | unused_fallback_keys, |
285 | 285 | device_list_summary, |
286 | 286 | ) |
295 | 295 | events: Iterable[EventBase], |
296 | 296 | ephemerals: Iterable[JsonDict], |
297 | 297 | to_device_messages: Iterable[JsonDict], |
298 | ) -> Tuple[TransactionOneTimeKeyCounts, TransactionUnusedFallbackKeys]: | |
298 | ) -> Tuple[TransactionOneTimeKeysCount, TransactionUnusedFallbackKeys]: | |
299 | 299 | """ |
300 | 300 | Given a list of the events, ephemeral messages and to-device messages, |
301 | 301 | - first computes a list of application services users that may have |
366 | 366 | events: List[EventBase], |
367 | 367 | ephemeral: Optional[List[JsonDict]] = None, |
368 | 368 | to_device_messages: Optional[List[JsonDict]] = None, |
369 | one_time_key_counts: Optional[TransactionOneTimeKeyCounts] = None, | |
369 | one_time_keys_count: Optional[TransactionOneTimeKeysCount] = None, | |
370 | 370 | unused_fallback_keys: Optional[TransactionUnusedFallbackKeys] = None, |
371 | 371 | device_list_summary: Optional[DeviceListUpdates] = None, |
372 | 372 | ) -> None: |
379 | 379 | events: The persistent events to include in the transaction. |
380 | 380 | ephemeral: The ephemeral events to include in the transaction. |
381 | 381 | to_device_messages: The to-device messages to include in the transaction. |
382 | one_time_key_counts: Counts of remaining one-time keys for relevant | |
382 | one_time_keys_count: Counts of remaining one-time keys for relevant | |
383 | 383 | appservice devices in the transaction. |
384 | 384 | unused_fallback_keys: Lists of unused fallback keys for relevant |
385 | 385 | appservice devices in the transaction. |
396 | 396 | events=events, |
397 | 397 | ephemeral=ephemeral or [], |
398 | 398 | to_device_messages=to_device_messages or [], |
399 | one_time_key_counts=one_time_key_counts or {}, | |
399 | one_time_keys_count=one_time_keys_count or {}, | |
400 | 400 | unused_fallback_keys=unused_fallback_keys or {}, |
401 | 401 | device_list_summary=device_list_summary or DeviceListUpdates(), |
402 | 402 | ) |
15 | 15 | |
16 | 16 | import attr |
17 | 17 | |
18 | from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions | |
18 | 19 | from synapse.config._base import Config |
19 | 20 | from synapse.types import JsonDict |
20 | 21 | |
51 | 52 | |
52 | 53 | # MSC3266 (room summary api) |
53 | 54 | self.msc3266_enabled: bool = experimental.get("msc3266_enabled", False) |
54 | ||
55 | # MSC3030 (Jump to date API endpoint) | |
56 | self.msc3030_enabled: bool = experimental.get("msc3030_enabled", False) | |
57 | 55 | |
58 | 56 | # MSC2409 (this setting only relates to optionally sending to-device messages). |
59 | 57 | # Presence, typing and read receipt EDUs are already sent to application services that |
130 | 128 | |
131 | 129 | # MSC3912: Relation-based redactions. |
132 | 130 | self.msc3912_enabled: bool = experimental.get("msc3912_enabled", False) |
131 | ||
132 | # MSC1767 and friends: Extensible Events | |
133 | self.msc1767_enabled: bool = experimental.get("msc1767_enabled", False) | |
134 | if self.msc1767_enabled: | |
135 | # Enable room version (and thus applicable push rules from MSC3931/3932) | |
136 | version_id = RoomVersions.MSC1767v10.identifier | |
137 | KNOWN_ROOM_VERSIONS[version_id] = RoomVersions.MSC1767v10 |
316 | 316 | Set up the logging subsystem. |
317 | 317 | |
318 | 318 | Args: |
319 | config (LoggingConfig | synapse.config.worker.WorkerConfig): | |
320 | configuration data | |
321 | ||
322 | use_worker_options (bool): True to use the 'worker_log_config' option | |
319 | config: configuration data | |
320 | ||
321 | use_worker_options: True to use the 'worker_log_config' option | |
323 | 322 | instead of 'log_config'. |
324 | 323 | |
325 | 324 | logBeginner: The Twisted logBeginner to use. |
42 | 42 | def read_config(self, config: JsonDict, **kwargs: Any) -> None: |
43 | 43 | self.enable_metrics = config.get("enable_metrics", False) |
44 | 44 | |
45 | self.enable_legacy_metrics = config.get("enable_legacy_metrics", False) | |
46 | ||
47 | 45 | self.report_stats = config.get("report_stats", None) |
48 | 46 | self.report_stats_endpoint = config.get( |
49 | 47 | "report_stats_endpoint", "https://matrix.org/report-usage-stats/push" |
149 | 149 | |
150 | 150 | self.rc_third_party_invite = RatelimitSettings( |
151 | 151 | config.get("rc_third_party_invite", {}), |
152 | defaults={ | |
153 | "per_second": self.rc_message.per_second, | |
154 | "burst_count": self.rc_message.burst_count, | |
155 | }, | |
152 | defaults={"per_second": 0.0025, "burst_count": 5}, | |
156 | 153 | ) |
28 | 28 | ) |
29 | 29 | from .server import DIRECT_TCP_ERROR, ListenerConfig, parse_listener_def |
30 | 30 | |
31 | _FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR = """ | |
32 | The send_federation config option must be disabled in the main | |
33 | synapse process before they can be run in a separate worker. | |
34 | ||
35 | Please add ``send_federation: false`` to the main config | |
36 | """ | |
37 | ||
38 | _PUSHER_WITH_START_PUSHERS_ENABLED_ERROR = """ | |
39 | The start_pushers config option must be disabled in the main | |
40 | synapse process before they can be run in a separate worker. | |
41 | ||
42 | Please add ``start_pushers: false`` to the main config | |
43 | """ | |
44 | ||
45 | 31 | _DEPRECATED_WORKER_DUTY_OPTION_USED = """ |
46 | 32 | The '%s' configuration option is deprecated and will be removed in a future |
47 | 33 | Synapse version. Please use ``%s: name_of_worker`` instead. |
161 | 147 | self.worker_name = config.get("worker_name", self.worker_app) |
162 | 148 | self.instance_name = self.worker_name or "master" |
163 | 149 | |
150 | # FIXME: Remove this check after a suitable amount of time. | |
164 | 151 | self.worker_main_http_uri = config.get("worker_main_http_uri", None) |
152 | if self.worker_main_http_uri is not None: | |
153 | logger.warning( | |
154 | "The config option worker_main_http_uri is unused since Synapse 1.73. " | |
155 | "It can be safely removed from your configuration." | |
156 | ) | |
165 | 157 | |
166 | 158 | # This option is really only here to support `--manhole` command line |
167 | 159 | # argument. |
175 | 167 | ) |
176 | 168 | ) |
177 | 169 | |
178 | # Handle federation sender configuration. | |
179 | # | |
180 | # There are two ways of configuring which instances handle federation | |
181 | # sending: | |
182 | # 1. The old way where "send_federation" is set to false and running a | |
183 | # `synapse.app.federation_sender` worker app. | |
184 | # 2. Specifying the workers sending federation in | |
185 | # `federation_sender_instances`. | |
186 | # | |
187 | ||
188 | send_federation = config.get("send_federation", True) | |
189 | ||
190 | federation_sender_instances = config.get("federation_sender_instances") | |
191 | if federation_sender_instances is None: | |
192 | # Default to an empty list, which means "another, unknown, worker is | |
193 | # responsible for it". | |
194 | federation_sender_instances = [] | |
195 | ||
196 | # If no federation sender instances are set we check if | |
197 | # `send_federation` is set, which means use master | |
198 | if send_federation: | |
199 | federation_sender_instances = ["master"] | |
200 | ||
201 | if self.worker_app == "synapse.app.federation_sender": | |
202 | if send_federation: | |
203 | # If we're running federation senders, and not using | |
204 | # `federation_sender_instances`, then we should have | |
205 | # explicitly set `send_federation` to false. | |
206 | raise ConfigError( | |
207 | _FEDERATION_SENDER_WITH_SEND_FEDERATION_ENABLED_ERROR | |
208 | ) | |
209 | ||
210 | federation_sender_instances = [self.worker_name] | |
211 | ||
170 | federation_sender_instances = self._worker_names_performing_this_duty( | |
171 | config, | |
172 | "send_federation", | |
173 | "synapse.app.federation_sender", | |
174 | "federation_sender_instances", | |
175 | ) | |
212 | 176 | self.send_federation = self.instance_name in federation_sender_instances |
213 | 177 | self.federation_shard_config = ShardedWorkerHandlingConfig( |
214 | 178 | federation_sender_instances |
275 | 239 | ) |
276 | 240 | |
277 | 241 | # Handle sharded push |
278 | start_pushers = config.get("start_pushers", True) | |
279 | pusher_instances = config.get("pusher_instances") | |
280 | if pusher_instances is None: | |
281 | # Default to an empty list, which means "another, unknown, worker is | |
282 | # responsible for it". | |
283 | pusher_instances = [] | |
284 | ||
285 | # If no pushers instances are set we check if `start_pushers` is | |
286 | # set, which means use master | |
287 | if start_pushers: | |
288 | pusher_instances = ["master"] | |
289 | ||
290 | if self.worker_app == "synapse.app.pusher": | |
291 | if start_pushers: | |
292 | # If we're running pushers, and not using | |
293 | # `pusher_instances`, then we should have explicitly set | |
294 | # `start_pushers` to false. | |
295 | raise ConfigError(_PUSHER_WITH_START_PUSHERS_ENABLED_ERROR) | |
296 | ||
297 | pusher_instances = [self.instance_name] | |
298 | ||
242 | pusher_instances = self._worker_names_performing_this_duty( | |
243 | config, | |
244 | "start_pushers", | |
245 | "synapse.app.pusher", | |
246 | "pusher_instances", | |
247 | ) | |
299 | 248 | self.start_pushers = self.instance_name in pusher_instances |
300 | 249 | self.pusher_shard_config = ShardedWorkerHandlingConfig(pusher_instances) |
301 | 250 | |
418 | 367 | # (By this point, these are either the same value or only one is not None.) |
419 | 368 | return bool(new_option_should_run_here or legacy_option_should_run_here) |
420 | 369 | |
370 | def _worker_names_performing_this_duty( | |
371 | self, | |
372 | config: Dict[str, Any], | |
373 | legacy_option_name: str, | |
374 | legacy_app_name: str, | |
375 | modern_instance_list_name: str, | |
376 | ) -> List[str]: | |
377 | """ | |
378 | Retrieves the names of the workers handling a given duty, by either legacy | |
379 | option or instance list. | |
380 | ||
381 | There are two ways of configuring which instances handle a given duty, e.g. | |
382 | for configuring pushers: | |
383 | ||
384 | 1. The old way where "start_pushers" is set to false and running a | |
385 | `synapse.app.pusher'` worker app. | |
386 | 2. Specifying the workers sending federation in `pusher_instances`. | |
387 | ||
388 | Args: | |
389 | config: settings read from yaml. | |
390 | legacy_option_name: the old way of enabling options. e.g. 'start_pushers' | |
391 | legacy_app_name: The historical app name. e.g. 'synapse.app.pusher' | |
392 | modern_instance_list_name: the string name of the new instance_list. e.g. | |
393 | 'pusher_instances' | |
394 | ||
395 | Returns: | |
396 | A list of worker instance names handling the given duty. | |
397 | """ | |
398 | ||
399 | legacy_option = config.get(legacy_option_name, True) | |
400 | ||
401 | worker_instances = config.get(modern_instance_list_name) | |
402 | if worker_instances is None: | |
403 | # Default to an empty list, which means "another, unknown, worker is | |
404 | # responsible for it". | |
405 | worker_instances = [] | |
406 | ||
407 | # If no worker instances are set we check if the legacy option | |
408 | # is set, which means use the main process. | |
409 | if legacy_option: | |
410 | worker_instances = ["master"] | |
411 | ||
412 | if self.worker_app == legacy_app_name: | |
413 | if legacy_option: | |
414 | # If we're using `legacy_app_name`, and not using | |
415 | # `modern_instance_list_name`, then we should have | |
416 | # explicitly set `legacy_option_name` to false. | |
417 | raise ConfigError( | |
418 | f"The '{legacy_option_name}' config option must be disabled in " | |
419 | "the main synapse process before they can be run in a separate " | |
420 | "worker.\n" | |
421 | f"Please add `{legacy_option_name}: false` to the main config.\n", | |
422 | ) | |
423 | ||
424 | worker_instances = [self.worker_name] | |
425 | ||
426 | return worker_instances | |
427 | ||
421 | 428 | def read_arguments(self, args: argparse.Namespace) -> None: |
422 | 429 | # We support a bunch of command line arguments that override options in |
423 | 430 | # the config. A lot of these options have a worker_* prefix when running |
212 | 212 | |
213 | 213 | def verify_json_objects_for_server( |
214 | 214 | self, server_and_json: Iterable[Tuple[str, dict, int]] |
215 | ) -> List[defer.Deferred]: | |
215 | ) -> List["defer.Deferred[None]"]: | |
216 | 216 | """Bulk verifies signatures of json objects, bulk fetching keys as |
217 | 217 | necessary. |
218 | 218 | |
225 | 225 | valid. |
226 | 226 | |
227 | 227 | Returns: |
228 | List<Deferred[None]>: for each input triplet, a deferred indicating success | |
229 | or failure to verify each json object's signature for the given | |
230 | server_name. The deferreds run their callbacks in the sentinel | |
231 | logcontext. | |
228 | For each input triplet, a deferred indicating success or failure to | |
229 | verify each json object's signature for the given server_name. The | |
230 | deferreds run their callbacks in the sentinel logcontext. | |
232 | 231 | """ |
233 | 232 | return [ |
234 | 233 | run_in_background( |
857 | 856 | response = await self.client.get_json( |
858 | 857 | destination=server_name, |
859 | 858 | path="/_matrix/key/v2/server/" |
860 | + urllib.parse.quote(requested_key_id), | |
859 | + urllib.parse.quote(requested_key_id, safe=""), | |
861 | 860 | ignore_backoff=True, |
862 | 861 | # we only give the remote server 10s to respond. It should be an |
863 | 862 | # easy request to handle, so if it doesn't reply within 10s, it's |
596 | 596 | format_version: The event format version |
597 | 597 | |
598 | 598 | Returns: |
599 | type: A type that can be initialized as per the initializer of | |
600 | `FrozenEvent` | |
599 | A type that can be initialized as per the initializer of `FrozenEvent` | |
601 | 600 | """ |
602 | 601 | |
603 | 602 | if format_version == EventFormatVersions.ROOM_V1_V2: |
127 | 127 | state_filter=StateFilter.from_types( |
128 | 128 | auth_types_for_event(self.room_version, self) |
129 | 129 | ), |
130 | await_full_state=False, | |
130 | 131 | ) |
131 | 132 | auth_event_ids = self._event_auth_handler.compute_auth_events( |
132 | 133 | self, state_ids |
1690 | 1690 | # to return events on *both* sides of the timestamp to |
1691 | 1691 | # help reconcile the gap faster. |
1692 | 1692 | _timestamp_to_event_from_destination, |
1693 | # Since this endpoint is new, we should try other servers before giving up. | |
1694 | # We can safely remove this in a year (remove after 2023-11-16). | |
1695 | failover_on_unknown_endpoint=True, | |
1693 | 1696 | ) |
1694 | 1697 | return timestamp_to_event_response |
1695 | except SynapseError: | |
1698 | except SynapseError as e: | |
1699 | logger.warn( | |
1700 | "timestamp_to_event(room_id=%s, timestamp=%s, direction=%s): encountered error when trying to fetch from destinations: %s", | |
1701 | room_id, | |
1702 | timestamp, | |
1703 | direction, | |
1704 | e, | |
1705 | ) | |
1696 | 1706 | return None |
1697 | 1707 | |
1698 | 1708 | async def _timestamp_to_event_from_destination( |
433 | 433 | # If there are no prev event IDs then the state is empty |
434 | 434 | # and so no remote servers in the room |
435 | 435 | destinations = set() |
436 | else: | |
436 | ||
437 | if destinations is None: | |
438 | # During partial join we use the set of servers that we got | |
439 | # when beginning the join. It's still possible that we send | |
440 | # events to servers that left the room in the meantime, but | |
441 | # we consider that an acceptable risk since it is only our own | |
442 | # events that we leak and not other server's ones. | |
443 | partial_state_destinations = ( | |
444 | await self.store.get_partial_state_servers_at_join( | |
445 | event.room_id | |
446 | ) | |
447 | ) | |
448 | ||
449 | if len(partial_state_destinations) > 0: | |
450 | destinations = partial_state_destinations | |
451 | ||
452 | if destinations is None: | |
437 | 453 | # We check the external cache for the destinations, which is |
438 | 454 | # stored per state group. |
439 | 455 |
34 | 34 | from synapse.logging.opentracing import SynapseTags, set_tag |
35 | 35 | from synapse.metrics import sent_transactions_counter |
36 | 36 | from synapse.metrics.background_process_metrics import run_as_background_process |
37 | from synapse.types import ReadReceipt | |
37 | from synapse.types import JsonDict, ReadReceipt | |
38 | 38 | from synapse.util.retryutils import NotRetryingDestination, get_retry_limiter |
39 | 39 | from synapse.visibility import filter_events_for_server |
40 | 40 | |
135 | 135 | # destination |
136 | 136 | self._pending_presence: Dict[str, UserPresenceState] = {} |
137 | 137 | |
138 | # room_id -> receipt_type -> user_id -> receipt_dict | |
139 | self._pending_rrs: Dict[str, Dict[str, Dict[str, dict]]] = {} | |
138 | # List of room_id -> receipt_type -> user_id -> receipt_dict, | |
139 | # | |
140 | # Each receipt can only have a single receipt per | |
141 | # (room ID, receipt type, user ID, thread ID) tuple. | |
142 | self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = [] | |
140 | 143 | self._rrs_pending_flush = False |
141 | 144 | |
142 | 145 | # stream_id of last successfully sent to-device message. |
201 | 204 | Args: |
202 | 205 | receipt: receipt to be queued |
203 | 206 | """ |
204 | self._pending_rrs.setdefault(receipt.room_id, {}).setdefault( | |
205 | receipt.receipt_type, {} | |
206 | )[receipt.user_id] = {"event_ids": receipt.event_ids, "data": receipt.data} | |
207 | serialized_receipt: JsonDict = { | |
208 | "event_ids": receipt.event_ids, | |
209 | "data": receipt.data, | |
210 | } | |
211 | if receipt.thread_id is not None: | |
212 | serialized_receipt["data"]["thread_id"] = receipt.thread_id | |
213 | ||
214 | # Find which EDU to add this receipt to. There's three situations depending | |
215 | # on the (room ID, receipt type, user, thread ID) tuple: | |
216 | # | |
217 | # 1. If it fully matches, clobber the information. | |
218 | # 2. If it is missing, add the information. | |
219 | # 3. If the subset tuple of (room ID, receipt type, user) matches, check | |
220 | # the next EDU (or add a new EDU). | |
221 | for edu in self._pending_receipt_edus: | |
222 | receipt_content = edu.setdefault(receipt.room_id, {}).setdefault( | |
223 | receipt.receipt_type, {} | |
224 | ) | |
225 | # If this room ID, receipt type, user ID is not in this EDU, OR if | |
226 | # the full tuple matches, use the current EDU. | |
227 | if ( | |
228 | receipt.user_id not in receipt_content | |
229 | or receipt_content[receipt.user_id].get("thread_id") | |
230 | == receipt.thread_id | |
231 | ): | |
232 | receipt_content[receipt.user_id] = serialized_receipt | |
233 | break | |
234 | ||
235 | # If no matching EDU was found, create a new one. | |
236 | else: | |
237 | self._pending_receipt_edus.append( | |
238 | { | |
239 | receipt.room_id: { | |
240 | receipt.receipt_type: {receipt.user_id: serialized_receipt} | |
241 | } | |
242 | } | |
243 | ) | |
207 | 244 | |
208 | 245 | def flush_read_receipts_for_room(self, room_id: str) -> None: |
209 | # if we don't have any read-receipts for this room, it may be that we've already | |
210 | # sent them out, so we don't need to flush. | |
211 | if room_id not in self._pending_rrs: | |
212 | return | |
213 | self._rrs_pending_flush = True | |
214 | self.attempt_new_transaction() | |
246 | # If there are any pending receipts for this room then force-flush them | |
247 | # in a new transaction. | |
248 | for edu in self._pending_receipt_edus: | |
249 | if room_id in edu: | |
250 | self._rrs_pending_flush = True | |
251 | self.attempt_new_transaction() | |
252 | # No use in checking remaining EDUs if the room was found. | |
253 | break | |
215 | 254 | |
216 | 255 | def send_keyed_edu(self, edu: Edu, key: Hashable) -> None: |
217 | 256 | self._pending_edus_keyed[(edu.edu_type, key)] = edu |
350 | 389 | self._pending_edus = [] |
351 | 390 | self._pending_edus_keyed = {} |
352 | 391 | self._pending_presence = {} |
353 | self._pending_rrs = {} | |
392 | self._pending_receipt_edus = [] | |
354 | 393 | |
355 | 394 | self._start_catching_up() |
356 | 395 | except FederationDeniedError as e: |
504 | 543 | new_pdus = await filter_events_for_server( |
505 | 544 | self._storage_controllers, |
506 | 545 | self._destination, |
546 | self._server_name, | |
507 | 547 | new_pdus, |
508 | 548 | redact=False, |
509 | 549 | ) |
541 | 581 | self._destination, last_successful_stream_ordering |
542 | 582 | ) |
543 | 583 | |
544 | def _get_rr_edus(self, force_flush: bool) -> Iterable[Edu]: | |
545 | if not self._pending_rrs: | |
584 | def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]: | |
585 | if not self._pending_receipt_edus: | |
546 | 586 | return |
547 | 587 | if not force_flush and not self._rrs_pending_flush: |
548 | 588 | # not yet time for this lot |
549 | 589 | return |
550 | 590 | |
551 | edu = Edu( | |
552 | origin=self._server_name, | |
553 | destination=self._destination, | |
554 | edu_type=EduTypes.RECEIPT, | |
555 | content=self._pending_rrs, | |
556 | ) | |
557 | self._pending_rrs = {} | |
558 | self._rrs_pending_flush = False | |
559 | yield edu | |
591 | # Send at most limit EDUs for receipts. | |
592 | for content in self._pending_receipt_edus[:limit]: | |
593 | yield Edu( | |
594 | origin=self._server_name, | |
595 | destination=self._destination, | |
596 | edu_type=EduTypes.RECEIPT, | |
597 | content=content, | |
598 | ) | |
599 | self._pending_receipt_edus = self._pending_receipt_edus[limit:] | |
600 | ||
601 | # If there are still pending read-receipts, don't reset the pending flush | |
602 | # flag. | |
603 | if not self._pending_receipt_edus: | |
604 | self._rrs_pending_flush = False | |
560 | 605 | |
561 | 606 | def _pop_pending_edus(self, limit: int) -> List[Edu]: |
562 | 607 | pending_edus = self._pending_edus |
643 | 688 | async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]: |
644 | 689 | # First we calculate the EDUs we want to send, if any. |
645 | 690 | |
646 | # We start by fetching device related EDUs, i.e device updates and to | |
647 | # device messages. We have to keep 2 free slots for presence and rr_edus. | |
648 | device_edu_limit = MAX_EDUS_PER_TRANSACTION - 2 | |
649 | ||
650 | # We prioritize to-device messages so that existing encryption channels | |
651 | # work. We also keep a few slots spare (by reducing the limit) so that | |
652 | # we can still trickle out some device list updates. | |
653 | ( | |
654 | to_device_edus, | |
655 | device_stream_id, | |
656 | ) = await self.queue._get_to_device_message_edus(device_edu_limit - 10) | |
657 | ||
658 | if to_device_edus: | |
659 | self._device_stream_id = device_stream_id | |
660 | else: | |
661 | self.queue._last_device_stream_id = device_stream_id | |
662 | ||
663 | device_edu_limit -= len(to_device_edus) | |
664 | ||
665 | device_update_edus, dev_list_id = await self.queue._get_device_update_edus( | |
666 | device_edu_limit | |
667 | ) | |
668 | ||
669 | if device_update_edus: | |
670 | self._device_list_id = dev_list_id | |
671 | else: | |
672 | self.queue._last_device_list_stream_id = dev_list_id | |
673 | ||
674 | pending_edus = device_update_edus + to_device_edus | |
675 | ||
676 | # Now add the read receipt EDU. | |
677 | pending_edus.extend(self.queue._get_rr_edus(force_flush=False)) | |
678 | ||
679 | # And presence EDU. | |
691 | # There's a maximum number of EDUs that can be sent with a transaction, | |
692 | # generally device updates and to-device messages get priority, but we | |
693 | # want to ensure that there's room for some other EDUs as well. | |
694 | # | |
695 | # This is done by: | |
696 | # | |
697 | # * Add a presence EDU, if one exists. | |
698 | # * Add up-to a small limit of read receipt EDUs. | |
699 | # * Add to-device EDUs, but leave some space for device list updates. | |
700 | # * Add device list updates EDUs. | |
701 | # * If there's any remaining room, add other EDUs. | |
702 | pending_edus = [] | |
703 | ||
704 | # Add presence EDU. | |
680 | 705 | if self.queue._pending_presence: |
681 | 706 | pending_edus.append( |
682 | 707 | Edu( |
695 | 720 | ) |
696 | 721 | self.queue._pending_presence = {} |
697 | 722 | |
723 | # Add read receipt EDUs. | |
724 | pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5)) | |
725 | edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus) | |
726 | ||
727 | # Next, prioritize to-device messages so that existing encryption channels | |
728 | # work. We also keep a few slots spare (by reducing the limit) so that | |
729 | # we can still trickle out some device list updates. | |
730 | ( | |
731 | to_device_edus, | |
732 | device_stream_id, | |
733 | ) = await self.queue._get_to_device_message_edus(edu_limit - 10) | |
734 | ||
735 | if to_device_edus: | |
736 | self._device_stream_id = device_stream_id | |
737 | else: | |
738 | self.queue._last_device_stream_id = device_stream_id | |
739 | ||
740 | pending_edus.extend(to_device_edus) | |
741 | edu_limit -= len(to_device_edus) | |
742 | ||
743 | # Add device list update EDUs. | |
744 | device_update_edus, dev_list_id = await self.queue._get_device_update_edus( | |
745 | edu_limit | |
746 | ) | |
747 | ||
748 | if device_update_edus: | |
749 | self._device_list_id = dev_list_id | |
750 | else: | |
751 | self.queue._last_device_list_stream_id = dev_list_id | |
752 | ||
753 | pending_edus.extend(device_update_edus) | |
754 | edu_limit -= len(device_update_edus) | |
755 | ||
698 | 756 | # Finally add any other types of EDUs if there is room. |
699 | pending_edus.extend( | |
700 | self.queue._pop_pending_edus(MAX_EDUS_PER_TRANSACTION - len(pending_edus)) | |
701 | ) | |
702 | while ( | |
703 | len(pending_edus) < MAX_EDUS_PER_TRANSACTION | |
704 | and self.queue._pending_edus_keyed | |
705 | ): | |
757 | other_edus = self.queue._pop_pending_edus(edu_limit) | |
758 | pending_edus.extend(other_edus) | |
759 | edu_limit -= len(other_edus) | |
760 | while edu_limit > 0 and self.queue._pending_edus_keyed: | |
706 | 761 | _, val = self.queue._pending_edus_keyed.popitem() |
707 | 762 | pending_edus.append(val) |
763 | edu_limit -= 1 | |
708 | 764 | |
709 | 765 | # Now we look for any PDUs to send, by getting up to 50 PDUs from the |
710 | 766 | # queue |
715 | 771 | |
716 | 772 | # if we've decided to send a transaction anyway, and we have room, we |
717 | 773 | # may as well send any pending RRs |
718 | if len(pending_edus) < MAX_EDUS_PER_TRANSACTION: | |
719 | pending_edus.extend(self.queue._get_rr_edus(force_flush=True)) | |
774 | if edu_limit: | |
775 | pending_edus.extend( | |
776 | self.queue._get_receipt_edus(force_flush=True, limit=edu_limit) | |
777 | ) | |
720 | 778 | |
721 | 779 | if self._pdus: |
722 | 780 | self._last_stream_ordering = self._pdus[ |
184 | 184 | Raises: |
185 | 185 | Various exceptions when the request fails |
186 | 186 | """ |
187 | path = _create_path( | |
188 | FEDERATION_UNSTABLE_PREFIX, | |
189 | "/org.matrix.msc3030/timestamp_to_event/%s", | |
187 | path = _create_v1_path( | |
188 | "/timestamp_to_event/%s", | |
190 | 189 | room_id, |
191 | 190 | ) |
192 | 191 | |
279 | 278 | Note that this does not append any events to any graphs. |
280 | 279 | |
281 | 280 | Args: |
282 | destination (str): address of remote homeserver | |
283 | room_id (str): room to join/leave | |
284 | user_id (str): user to be joined/left | |
285 | membership (str): one of join/leave | |
286 | params (dict[str, str|Iterable[str]]): Query parameters to include in the | |
287 | request. | |
281 | destination: address of remote homeserver | |
282 | room_id: room to join/leave | |
283 | user_id: user to be joined/left | |
284 | membership: one of join/leave | |
285 | params: Query parameters to include in the request. | |
288 | 286 | |
289 | 287 | Returns: |
290 | 288 | Succeeds when we get a 2xx HTTP response. The result |
24 | 24 | from synapse.federation.transport.server.federation import ( |
25 | 25 | FEDERATION_SERVLET_CLASSES, |
26 | 26 | FederationAccountStatusServlet, |
27 | FederationTimestampLookupServlet, | |
28 | 27 | ) |
29 | 28 | from synapse.http.server import HttpServer, JsonResource |
30 | 29 | from synapse.http.servlet import ( |
290 | 289 | ) |
291 | 290 | |
292 | 291 | for servletclass in SERVLET_GROUPS[servlet_group]: |
293 | # Only allow the `/timestamp_to_event` servlet if msc3030 is enabled | |
294 | if ( | |
295 | servletclass == FederationTimestampLookupServlet | |
296 | and not hs.config.experimental.msc3030_enabled | |
297 | ): | |
298 | continue | |
299 | ||
300 | 292 | # Only allow the `/account_status` servlet if msc3720 is enabled |
301 | 293 | if ( |
302 | 294 | servletclass == FederationAccountStatusServlet |
223 | 223 | |
224 | 224 | With arguments: |
225 | 225 | |
226 | origin (unicode|None): The authenticated server_name of the calling server, | |
226 | origin (str|None): The authenticated server_name of the calling server, | |
227 | 227 | unless REQUIRE_AUTH is set to False and authentication failed. |
228 | 228 | |
229 | content (unicode|None): decoded json body of the request. None if the | |
229 | content (str|None): decoded json body of the request. None if the | |
230 | 230 | request was a GET. |
231 | 231 | |
232 | 232 | query (dict[bytes, list[bytes]]): Query params from the request. url-decoded |
217 | 217 | `dir` can be `f` or `b` to indicate forwards and backwards in time from the |
218 | 218 | given timestamp. |
219 | 219 | |
220 | GET /_matrix/federation/unstable/org.matrix.msc3030/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction> | |
220 | GET /_matrix/federation/v1/timestamp_to_event/<roomID>?ts=<timestamp>&dir=<direction> | |
221 | 221 | { |
222 | 222 | "event_id": ... |
223 | 223 | } |
224 | 224 | """ |
225 | 225 | |
226 | 226 | PATH = "/timestamp_to_event/(?P<room_id>[^/]*)/?" |
227 | PREFIX = FEDERATION_UNSTABLE_PREFIX + "/org.matrix.msc3030" | |
228 | 227 | |
229 | 228 | async def on_GET( |
230 | 229 | self, |
15 | 15 | from typing import TYPE_CHECKING, Optional |
16 | 16 | |
17 | 17 | from synapse.api.errors import SynapseError |
18 | from synapse.handlers.device import DeviceHandler | |
18 | 19 | from synapse.metrics.background_process_metrics import run_as_background_process |
19 | 20 | from synapse.types import Codes, Requester, UserID, create_requester |
20 | 21 | |
74 | 75 | Returns: |
75 | 76 | True if identity server supports removing threepids, otherwise False. |
76 | 77 | """ |
78 | ||
79 | # This can only be called on the main process. | |
80 | assert isinstance(self._device_handler, DeviceHandler) | |
77 | 81 | |
78 | 82 | # Check if this user can be deactivated |
79 | 83 | if not await self._third_party_rules.check_can_deactivate_user( |
64 | 64 | |
65 | 65 | |
66 | 66 | class DeviceWorkerHandler: |
67 | device_list_updater: "DeviceListWorkerUpdater" | |
68 | ||
67 | 69 | def __init__(self, hs: "HomeServer"): |
68 | 70 | self.clock = hs.get_clock() |
69 | 71 | self.hs = hs |
75 | 77 | self.server_name = hs.hostname |
76 | 78 | self._msc3852_enabled = hs.config.experimental.msc3852_enabled |
77 | 79 | |
80 | self.device_list_updater = DeviceListWorkerUpdater(hs) | |
81 | ||
78 | 82 | @trace |
79 | 83 | async def get_devices_by_user(self, user_id: str) -> List[JsonDict]: |
80 | 84 | """ |
97 | 101 | |
98 | 102 | log_kv(device_map) |
99 | 103 | return devices |
104 | ||
105 | async def get_dehydrated_device( | |
106 | self, user_id: str | |
107 | ) -> Optional[Tuple[str, JsonDict]]: | |
108 | """Retrieve the information for a dehydrated device. | |
109 | ||
110 | Args: | |
111 | user_id: the user whose dehydrated device we are looking for | |
112 | Returns: | |
113 | a tuple whose first item is the device ID, and the second item is | |
114 | the dehydrated device information | |
115 | """ | |
116 | return await self.store.get_dehydrated_device(user_id) | |
100 | 117 | |
101 | 118 | @trace |
102 | 119 | async def get_device(self, user_id: str, device_id: str) -> JsonDict: |
126 | 143 | @cancellable |
127 | 144 | async def get_device_changes_in_shared_rooms( |
128 | 145 | self, user_id: str, room_ids: Collection[str], from_token: StreamToken |
129 | ) -> Collection[str]: | |
146 | ) -> Set[str]: | |
130 | 147 | """Get the set of users whose devices have changed who share a room with |
131 | 148 | the given user. |
132 | 149 | """ |
319 | 336 | |
320 | 337 | |
321 | 338 | class DeviceHandler(DeviceWorkerHandler): |
339 | device_list_updater: "DeviceListUpdater" | |
340 | ||
322 | 341 | def __init__(self, hs: "HomeServer"): |
323 | 342 | super().__init__(hs) |
324 | 343 | |
605 | 624 | await self.delete_devices(user_id, [old_device_id]) |
606 | 625 | return device_id |
607 | 626 | |
608 | async def get_dehydrated_device( | |
609 | self, user_id: str | |
610 | ) -> Optional[Tuple[str, JsonDict]]: | |
611 | """Retrieve the information for a dehydrated device. | |
612 | ||
613 | Args: | |
614 | user_id: the user whose dehydrated device we are looking for | |
615 | Returns: | |
616 | a tuple whose first item is the device ID, and the second item is | |
617 | the dehydrated device information | |
618 | """ | |
619 | return await self.store.get_dehydrated_device(user_id) | |
620 | ||
621 | 627 | async def rehydrate_device( |
622 | 628 | self, user_id: str, access_token: str, device_id: str |
623 | 629 | ) -> dict: |
681 | 687 | hosts_already_sent_to: Set[str] = set() |
682 | 688 | |
683 | 689 | try: |
690 | stream_id, room_id = await self.store.get_device_change_last_converted_pos() | |
691 | ||
684 | 692 | while True: |
685 | 693 | self._handle_new_device_update_new_data = False |
686 | rows = await self.store.get_uncoverted_outbound_room_pokes() | |
694 | max_stream_id = self.store.get_device_stream_token() | |
695 | rows = await self.store.get_uncoverted_outbound_room_pokes( | |
696 | stream_id, room_id | |
697 | ) | |
687 | 698 | if not rows: |
688 | 699 | # If the DB returned nothing then there is nothing left to |
689 | 700 | # do, *unless* a new device list update happened during the |
690 | 701 | # DB query. |
702 | ||
703 | # Advance `(stream_id, room_id)`. | |
704 | # `max_stream_id` comes from *before* the query for unconverted | |
705 | # rows, which means that any unconverted rows must have a larger | |
706 | # stream ID. | |
707 | if max_stream_id > stream_id: | |
708 | stream_id, room_id = max_stream_id, "" | |
709 | await self.store.set_device_change_last_converted_pos( | |
710 | stream_id, room_id | |
711 | ) | |
712 | else: | |
713 | assert max_stream_id == stream_id | |
714 | # Avoid moving `room_id` backwards. | |
715 | pass | |
716 | ||
691 | 717 | if self._handle_new_device_update_new_data: |
692 | 718 | continue |
693 | 719 | else: |
717 | 743 | user_id=user_id, |
718 | 744 | device_id=device_id, |
719 | 745 | room_id=room_id, |
720 | stream_id=stream_id, | |
721 | 746 | hosts=hosts, |
722 | 747 | context=opentracing_context, |
723 | 748 | ) |
751 | 776 | hosts_already_sent_to.update(hosts) |
752 | 777 | current_stream_id = stream_id |
753 | 778 | |
779 | # Advance `(stream_id, room_id)`. | |
780 | _, _, room_id, stream_id, _ = rows[-1] | |
781 | await self.store.set_device_change_last_converted_pos( | |
782 | stream_id, room_id | |
783 | ) | |
784 | ||
754 | 785 | finally: |
755 | 786 | self._handle_new_device_update_is_processing = False |
756 | 787 | |
833 | 864 | user_id=user_id, |
834 | 865 | device_id=device_id, |
835 | 866 | room_id=room_id, |
836 | stream_id=None, | |
837 | 867 | hosts=potentially_changed_hosts, |
838 | 868 | context=None, |
839 | 869 | ) |
857 | 887 | ) |
858 | 888 | |
859 | 889 | |
860 | class DeviceListUpdater: | |
890 | class DeviceListWorkerUpdater: | |
891 | "Handles incoming device list updates from federation and contacts the main process over replication" | |
892 | ||
893 | def __init__(self, hs: "HomeServer"): | |
894 | from synapse.replication.http.devices import ( | |
895 | ReplicationUserDevicesResyncRestServlet, | |
896 | ) | |
897 | ||
898 | self._user_device_resync_client = ( | |
899 | ReplicationUserDevicesResyncRestServlet.make_client(hs) | |
900 | ) | |
901 | ||
902 | async def user_device_resync( | |
903 | self, user_id: str, mark_failed_as_stale: bool = True | |
904 | ) -> Optional[JsonDict]: | |
905 | """Fetches all devices for a user and updates the device cache with them. | |
906 | ||
907 | Args: | |
908 | user_id: The user's id whose device_list will be updated. | |
909 | mark_failed_as_stale: Whether to mark the user's device list as stale | |
910 | if the attempt to resync failed. | |
911 | Returns: | |
912 | A dict with device info as under the "devices" in the result of this | |
913 | request: | |
914 | https://matrix.org/docs/spec/server_server/r0.1.2#get-matrix-federation-v1-user-devices-userid | |
915 | """ | |
916 | return await self._user_device_resync_client(user_id=user_id) | |
917 | ||
918 | ||
919 | class DeviceListUpdater(DeviceListWorkerUpdater): | |
861 | 920 | "Handles incoming device list updates from federation and updates the DB" |
862 | 921 | |
863 | 922 | def __init__(self, hs: "HomeServer", device_handler: DeviceHandler): |
26 | 26 | |
27 | 27 | from synapse.api.constants import EduTypes |
28 | 28 | from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError |
29 | from synapse.handlers.device import DeviceHandler | |
29 | 30 | from synapse.logging.context import make_deferred_yieldable, run_in_background |
30 | 31 | from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace |
31 | from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet | |
32 | 32 | from synapse.types import ( |
33 | 33 | JsonDict, |
34 | 34 | UserID, |
55 | 55 | self.is_mine = hs.is_mine |
56 | 56 | self.clock = hs.get_clock() |
57 | 57 | |
58 | self._edu_updater = SigningKeyEduUpdater(hs, self) | |
59 | ||
60 | 58 | federation_registry = hs.get_federation_registry() |
61 | 59 | |
62 | self._is_master = hs.config.worker.worker_app is None | |
63 | if not self._is_master: | |
64 | self._user_device_resync_client = ( | |
65 | ReplicationUserDevicesResyncRestServlet.make_client(hs) | |
66 | ) | |
67 | else: | |
60 | is_master = hs.config.worker.worker_app is None | |
61 | if is_master: | |
62 | edu_updater = SigningKeyEduUpdater(hs) | |
63 | ||
68 | 64 | # Only register this edu handler on master as it requires writing |
69 | 65 | # device updates to the db |
70 | 66 | federation_registry.register_edu_handler( |
71 | 67 | EduTypes.SIGNING_KEY_UPDATE, |
72 | self._edu_updater.incoming_signing_key_update, | |
68 | edu_updater.incoming_signing_key_update, | |
73 | 69 | ) |
74 | 70 | # also handle the unstable version |
75 | 71 | # FIXME: remove this when enough servers have upgraded |
76 | 72 | federation_registry.register_edu_handler( |
77 | 73 | EduTypes.UNSTABLE_SIGNING_KEY_UPDATE, |
78 | self._edu_updater.incoming_signing_key_update, | |
74 | edu_updater.incoming_signing_key_update, | |
79 | 75 | ) |
80 | 76 | |
81 | 77 | # doesn't really work as part of the generic query API, because the |
318 | 314 | # probably be tracking their device lists. However, we haven't |
319 | 315 | # done an initial sync on the device list so we do it now. |
320 | 316 | try: |
321 | if self._is_master: | |
322 | resync_results = await self.device_handler.device_list_updater.user_device_resync( | |
317 | resync_results = ( | |
318 | await self.device_handler.device_list_updater.user_device_resync( | |
323 | 319 | user_id |
324 | 320 | ) |
325 | else: | |
326 | resync_results = await self._user_device_resync_client( | |
327 | user_id=user_id | |
328 | ) | |
321 | ) | |
322 | if resync_results is None: | |
323 | raise ValueError("Device resync failed") | |
329 | 324 | |
330 | 325 | # Add the device keys to the results. |
331 | 326 | user_devices = resync_results["devices"] |
604 | 599 | async def upload_keys_for_user( |
605 | 600 | self, user_id: str, device_id: str, keys: JsonDict |
606 | 601 | ) -> JsonDict: |
602 | # This can only be called from the main process. | |
603 | assert isinstance(self.device_handler, DeviceHandler) | |
607 | 604 | |
608 | 605 | time_now = self.clock.time_msec() |
609 | 606 | |
731 | 728 | user_id: the user uploading the keys |
732 | 729 | keys: the signing keys |
733 | 730 | """ |
731 | # This can only be called from the main process. | |
732 | assert isinstance(self.device_handler, DeviceHandler) | |
734 | 733 | |
735 | 734 | # if a master key is uploaded, then check it. Otherwise, load the |
736 | 735 | # stored master key, to check signatures on other keys |
822 | 821 | Raises: |
823 | 822 | SynapseError: if the signatures dict is not valid. |
824 | 823 | """ |
824 | # This can only be called from the main process. | |
825 | assert isinstance(self.device_handler, DeviceHandler) | |
826 | ||
825 | 827 | failures = {} |
826 | 828 | |
827 | 829 | # signatures to be stored. Each item will be a SignatureListItem |
869 | 871 | - signatures of the user's master key by the user's devices. |
870 | 872 | |
871 | 873 | Args: |
872 | user_id (string): the user uploading the keys | |
874 | user_id: the user uploading the keys | |
873 | 875 | signatures (dict[string, dict]): map of devices to signed keys |
874 | 876 | |
875 | 877 | Returns: |
1199 | 1201 | A tuple of the retrieved key content, the key's ID and the matching VerifyKey. |
1200 | 1202 | If the key cannot be retrieved, all values in the tuple will instead be None. |
1201 | 1203 | """ |
1204 | # This can only be called from the main process. | |
1205 | assert isinstance(self.device_handler, DeviceHandler) | |
1206 | ||
1202 | 1207 | try: |
1203 | 1208 | remote_result = await self.federation.query_user_devices( |
1204 | 1209 | user.domain, user.to_string() |
1395 | 1400 | class SigningKeyEduUpdater: |
1396 | 1401 | """Handles incoming signing key updates from federation and updates the DB""" |
1397 | 1402 | |
1398 | def __init__(self, hs: "HomeServer", e2e_keys_handler: E2eKeysHandler): | |
1403 | def __init__(self, hs: "HomeServer"): | |
1399 | 1404 | self.store = hs.get_datastores().main |
1400 | 1405 | self.federation = hs.get_federation_client() |
1401 | 1406 | self.clock = hs.get_clock() |
1402 | self.e2e_keys_handler = e2e_keys_handler | |
1407 | ||
1408 | device_handler = hs.get_device_handler() | |
1409 | assert isinstance(device_handler, DeviceHandler) | |
1410 | self._device_handler = device_handler | |
1403 | 1411 | |
1404 | 1412 | self._remote_edu_linearizer = Linearizer(name="remote_signing_key") |
1405 | 1413 | |
1444 | 1452 | user_id: the user whose updates we are processing |
1445 | 1453 | """ |
1446 | 1454 | |
1447 | device_handler = self.e2e_keys_handler.device_handler | |
1448 | device_list_updater = device_handler.device_list_updater | |
1449 | ||
1450 | 1455 | async with self._remote_edu_linearizer.queue(user_id): |
1451 | 1456 | pending_updates = self._pending_updates.pop(user_id, []) |
1452 | 1457 | if not pending_updates: |
1458 | 1463 | logger.info("pending updates: %r", pending_updates) |
1459 | 1464 | |
1460 | 1465 | for master_key, self_signing_key in pending_updates: |
1461 | new_device_ids = ( | |
1462 | await device_list_updater.process_cross_signing_key_update( | |
1463 | user_id, | |
1464 | master_key, | |
1465 | self_signing_key, | |
1466 | ) | |
1466 | new_device_ids = await self._device_handler.device_list_updater.process_cross_signing_key_update( | |
1467 | user_id, | |
1468 | master_key, | |
1469 | self_signing_key, | |
1467 | 1470 | ) |
1468 | 1471 | device_ids = device_ids + new_device_ids |
1469 | 1472 | |
1470 | await device_handler.notify_device_update(user_id, device_ids) | |
1473 | await self._device_handler.notify_device_update(user_id, device_ids) |
376 | 376 | """Deletes a given version of the user's e2e_room_keys backup |
377 | 377 | |
378 | 378 | Args: |
379 | user_id(str): the user whose current backup version we're deleting | |
380 | version(str): the version id of the backup being deleted | |
379 | user_id: the user whose current backup version we're deleting | |
380 | version: Optional. the version ID of the backup version we're deleting | |
381 | If missing, we delete the current backup version info. | |
381 | 382 | Raises: |
382 | 383 | NotFoundError: if this backup version doesn't exist |
383 | 384 | """ |
44 | 44 | def __init__(self, hs: "HomeServer"): |
45 | 45 | self._clock = hs.get_clock() |
46 | 46 | self._store = hs.get_datastores().main |
47 | self._state_storage_controller = hs.get_storage_controllers().state | |
47 | 48 | self._server_name = hs.hostname |
48 | 49 | |
49 | 50 | async def check_auth_rules_from_context( |
178 | 179 | this function may return an incorrect result as we are not able to fully |
179 | 180 | track server membership in a room without full state. |
180 | 181 | """ |
181 | if not allow_partial_state_rooms and await self._store.is_partial_state_room( | |
182 | room_id | |
183 | ): | |
184 | raise AuthError( | |
185 | 403, | |
186 | "Unable to authorise you right now; room is partial-stated here.", | |
187 | errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE, | |
188 | ) | |
189 | ||
190 | if not await self.is_host_in_room(room_id, host): | |
191 | raise AuthError(403, "Host not in room.") | |
182 | if await self._store.is_partial_state_room(room_id): | |
183 | if allow_partial_state_rooms: | |
184 | current_hosts = await self._state_storage_controller.get_current_hosts_in_room_or_partial_state_approximation( | |
185 | room_id | |
186 | ) | |
187 | if host not in current_hosts: | |
188 | raise AuthError(403, "Host not in room (partial-state approx).") | |
189 | else: | |
190 | raise AuthError( | |
191 | 403, | |
192 | "Unable to authorise you right now; room is partial-stated here.", | |
193 | errcode=Codes.UNABLE_DUE_TO_PARTIAL_STATE, | |
194 | ) | |
195 | else: | |
196 | if not await self.is_host_in_room(room_id, host): | |
197 | raise AuthError(403, "Host not in room.") | |
192 | 198 | |
193 | 199 | async def check_restricted_join_rules( |
194 | 200 | self, |
377 | 377 | # positives from users having been erased. |
378 | 378 | filtered_extremities = await filter_events_for_server( |
379 | 379 | self._storage_controllers, |
380 | self.server_name, | |
380 | 381 | self.server_name, |
381 | 382 | events_to_check, |
382 | 383 | redact=False, |
1230 | 1231 | async def on_backfill_request( |
1231 | 1232 | self, origin: str, room_id: str, pdu_list: List[str], limit: int |
1232 | 1233 | ) -> List[EventBase]: |
1233 | await self._event_auth_handler.assert_host_in_room(room_id, origin) | |
1234 | # We allow partially joined rooms since in this case we are filtering out | |
1235 | # non-local events in `filter_events_for_server`. | |
1236 | await self._event_auth_handler.assert_host_in_room(room_id, origin, True) | |
1234 | 1237 | |
1235 | 1238 | # Synapse asks for 100 events per backfill request. Do not allow more. |
1236 | 1239 | limit = min(limit, 100) |
1251 | 1254 | ) |
1252 | 1255 | |
1253 | 1256 | events = await filter_events_for_server( |
1254 | self._storage_controllers, origin, events | |
1257 | self._storage_controllers, origin, self.server_name, events | |
1255 | 1258 | ) |
1256 | 1259 | |
1257 | 1260 | return events |
1282 | 1285 | await self._event_auth_handler.assert_host_in_room(event.room_id, origin) |
1283 | 1286 | |
1284 | 1287 | events = await filter_events_for_server( |
1285 | self._storage_controllers, origin, [event] | |
1288 | self._storage_controllers, origin, self.server_name, [event] | |
1286 | 1289 | ) |
1287 | 1290 | event = events[0] |
1288 | 1291 | return event |
1295 | 1298 | latest_events: List[str], |
1296 | 1299 | limit: int, |
1297 | 1300 | ) -> List[EventBase]: |
1298 | await self._event_auth_handler.assert_host_in_room(room_id, origin) | |
1301 | # We allow partially joined rooms since in this case we are filtering out | |
1302 | # non-local events in `filter_events_for_server`. | |
1303 | await self._event_auth_handler.assert_host_in_room(room_id, origin, True) | |
1299 | 1304 | |
1300 | 1305 | # Only allow up to 20 events to be retrieved per request. |
1301 | 1306 | limit = min(limit, 20) |
1308 | 1313 | ) |
1309 | 1314 | |
1310 | 1315 | missing_events = await filter_events_for_server( |
1311 | self._storage_controllers, origin, missing_events | |
1316 | self._storage_controllers, origin, self.server_name, missing_events | |
1312 | 1317 | ) |
1313 | 1318 | |
1314 | 1319 | return missing_events |
1595 | 1600 | Fetch the complexity of a remote room over federation. |
1596 | 1601 | |
1597 | 1602 | Args: |
1598 | remote_room_hosts (list[str]): The remote servers to ask. | |
1599 | room_id (str): The room ID to ask about. | |
1603 | remote_room_hosts: The remote servers to ask. | |
1604 | room_id: The room ID to ask about. | |
1600 | 1605 | |
1601 | 1606 | Returns: |
1602 | 1607 | Dict contains the complexity |
710 | 710 | inviter_display_name: The current display name of the |
711 | 711 | inviter. |
712 | 712 | inviter_avatar_url: The URL of the inviter's avatar. |
713 | id_access_token (str): The access token to authenticate to the identity | |
713 | id_access_token: The access token to authenticate to the identity | |
714 | 714 | server with |
715 | 715 | |
716 | 716 | Returns: |
1134 | 1134 | ) |
1135 | 1135 | state_events = await self.store.get_events_as_list(state_event_ids) |
1136 | 1136 | # Create a StateMap[str] |
1137 | state_map = {(e.type, e.state_key): e.event_id for e in state_events} | |
1137 | current_state_ids = { | |
1138 | (e.type, e.state_key): e.event_id for e in state_events | |
1139 | } | |
1138 | 1140 | # Actually strip down and only use the necessary auth events |
1139 | 1141 | auth_event_ids = self._event_auth_handler.compute_auth_events( |
1140 | 1142 | event=temp_event, |
1141 | current_state_ids=state_map, | |
1143 | current_state_ids=current_state_ids, | |
1142 | 1144 | for_verification=False, |
1143 | 1145 | ) |
1144 | 1146 |
786 | 786 | Must include an ``access_token`` field. |
787 | 787 | |
788 | 788 | Returns: |
789 | UserInfo: an object representing the user. | |
789 | an object representing the user. | |
790 | 790 | """ |
791 | 791 | logger.debug("Using the OAuth2 access_token to request userinfo") |
792 | 792 | metadata = await self.load_metadata() |
1434 | 1434 | localpart: Optional[str] |
1435 | 1435 | confirm_localpart: bool |
1436 | 1436 | display_name: Optional[str] |
1437 | picture: Optional[str] # may be omitted by older `OidcMappingProviders` | |
1437 | 1438 | emails: List[str] |
1438 | 1439 | |
1439 | 1440 | |
1519 | 1520 | @attr.s(slots=True, frozen=True, auto_attribs=True) |
1520 | 1521 | class JinjaOidcMappingConfig: |
1521 | 1522 | subject_claim: str |
1523 | picture_claim: str | |
1522 | 1524 | localpart_template: Optional[Template] |
1523 | 1525 | display_name_template: Optional[Template] |
1524 | 1526 | email_template: Optional[Template] |
1538 | 1540 | @staticmethod |
1539 | 1541 | def parse_config(config: dict) -> JinjaOidcMappingConfig: |
1540 | 1542 | subject_claim = config.get("subject_claim", "sub") |
1543 | picture_claim = config.get("picture_claim", "picture") | |
1541 | 1544 | |
1542 | 1545 | def parse_template_config(option_name: str) -> Optional[Template]: |
1543 | 1546 | if option_name not in config: |
1571 | 1574 | |
1572 | 1575 | return JinjaOidcMappingConfig( |
1573 | 1576 | subject_claim=subject_claim, |
1577 | picture_claim=picture_claim, | |
1574 | 1578 | localpart_template=localpart_template, |
1575 | 1579 | display_name_template=display_name_template, |
1576 | 1580 | email_template=email_template, |
1610 | 1614 | if email: |
1611 | 1615 | emails.append(email) |
1612 | 1616 | |
1617 | picture = userinfo.get("picture") | |
1618 | ||
1613 | 1619 | return UserAttributeDict( |
1614 | 1620 | localpart=localpart, |
1615 | 1621 | display_name=display_name, |
1616 | 1622 | emails=emails, |
1623 | picture=picture, | |
1617 | 1624 | confirm_localpart=self._config.confirm_localpart, |
1618 | 1625 | ) |
1619 | 1626 |
447 | 447 | |
448 | 448 | if pagin_config.from_token: |
449 | 449 | from_token = pagin_config.from_token |
450 | elif pagin_config.direction == "f": | |
451 | from_token = ( | |
452 | await self.hs.get_event_sources().get_start_token_for_pagination( | |
453 | room_id | |
454 | ) | |
455 | ) | |
450 | 456 | else: |
451 | 457 | from_token = ( |
452 | 458 | await self.hs.get_event_sources().get_current_token_for_pagination( |
200 | 200 | """Get the current presence state for multiple users. |
201 | 201 | |
202 | 202 | Returns: |
203 | dict: `user_id` -> `UserPresenceState` | |
203 | A mapping of `user_id` -> `UserPresenceState` | |
204 | 204 | """ |
205 | 205 | states = {} |
206 | 206 | missing = [] |
477 | 477 | return _NullContextManager() |
478 | 478 | |
479 | 479 | prev_state = await self.current_state_for_user(user_id) |
480 | if prev_state != PresenceState.BUSY: | |
480 | if prev_state.state != PresenceState.BUSY: | |
481 | 481 | # We set state here but pass ignore_status_msg = True as we don't want to |
482 | 482 | # cause the status message to be cleared. |
483 | 483 | # Note that this causes last_active_ts to be incremented which is not |
91 | 91 | continue |
92 | 92 | |
93 | 93 | # Check if these receipts apply to a thread. |
94 | thread_id = None | |
95 | 94 | data = user_values.get("data", {}) |
96 | 95 | thread_id = data.get("thread_id") |
97 | 96 | # If the thread ID is invalid, consider it missing. |
37 | 37 | ) |
38 | 38 | from synapse.appservice import ApplicationService |
39 | 39 | from synapse.config.server import is_threepid_reserved |
40 | from synapse.handlers.device import DeviceHandler | |
40 | 41 | from synapse.http.servlet import assert_params_in_dict |
41 | 42 | from synapse.replication.http.login import RegisterDeviceReplicationServlet |
42 | 43 | from synapse.replication.http.register import ( |
840 | 841 | refresh_token = None |
841 | 842 | refresh_token_id = None |
842 | 843 | |
844 | # This can only run on the main process. | |
845 | assert isinstance(self.device_handler, DeviceHandler) | |
846 | ||
843 | 847 | registered_device_id = await self.device_handler.check_device_registered( |
844 | 848 | user_id, |
845 | 849 | device_id, |
12 | 12 | # limitations under the License. |
13 | 13 | import enum |
14 | 14 | import logging |
15 | from typing import TYPE_CHECKING, Dict, FrozenSet, Iterable, List, Optional, Tuple | |
15 | from typing import TYPE_CHECKING, Collection, Dict, FrozenSet, Iterable, List, Optional | |
16 | 16 | |
17 | 17 | import attr |
18 | 18 | |
19 | 19 | from synapse.api.constants import EventTypes, RelationTypes |
20 | 20 | from synapse.api.errors import SynapseError |
21 | 21 | from synapse.events import EventBase, relation_from_event |
22 | from synapse.logging.context import make_deferred_yieldable, run_in_background | |
22 | 23 | from synapse.logging.opentracing import trace |
23 | 24 | from synapse.storage.databases.main.relations import ThreadsNextBatch, _RelatedEvent |
24 | 25 | from synapse.streams.config import PaginationConfig |
25 | from synapse.types import JsonDict, Requester, StreamToken, UserID | |
26 | from synapse.types import JsonDict, Requester, UserID | |
27 | from synapse.util.async_helpers import gather_results | |
26 | 28 | from synapse.visibility import filter_events_for_client |
27 | 29 | |
28 | 30 | if TYPE_CHECKING: |
170 | 172 | ) |
171 | 173 | |
172 | 174 | return return_value |
173 | ||
174 | async def get_relations_for_event( | |
175 | self, | |
176 | event_id: str, | |
177 | event: EventBase, | |
178 | room_id: str, | |
179 | relation_type: str, | |
180 | ignored_users: FrozenSet[str] = frozenset(), | |
181 | ) -> Tuple[List[_RelatedEvent], Optional[StreamToken]]: | |
182 | """Get a list of events which relate to an event, ordered by topological ordering. | |
183 | ||
184 | Args: | |
185 | event_id: Fetch events that relate to this event ID. | |
186 | event: The matching EventBase to event_id. | |
187 | room_id: The room the event belongs to. | |
188 | relation_type: The type of relation. | |
189 | ignored_users: The users ignored by the requesting user. | |
190 | ||
191 | Returns: | |
192 | List of event IDs that match relations requested. The rows are of | |
193 | the form `{"event_id": "..."}`. | |
194 | """ | |
195 | ||
196 | # Call the underlying storage method, which is cached. | |
197 | related_events, next_token = await self._main_store.get_relations_for_event( | |
198 | event_id, event, room_id, relation_type, direction="f" | |
199 | ) | |
200 | ||
201 | # Filter out ignored users and convert to the expected format. | |
202 | related_events = [ | |
203 | event for event in related_events if event.sender not in ignored_users | |
204 | ] | |
205 | ||
206 | return related_events, next_token | |
207 | 175 | |
208 | 176 | async def redact_events_related_to( |
209 | 177 | self, |
258 | 226 | e.msg, |
259 | 227 | ) |
260 | 228 | |
261 | async def get_annotations_for_event( | |
262 | self, | |
263 | event_id: str, | |
264 | room_id: str, | |
265 | limit: int = 5, | |
266 | ignored_users: FrozenSet[str] = frozenset(), | |
267 | ) -> List[JsonDict]: | |
268 | """Get a list of annotations on the event, grouped by event type and | |
229 | async def get_annotations_for_events( | |
230 | self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset() | |
231 | ) -> Dict[str, List[JsonDict]]: | |
232 | """Get a list of annotations to the given events, grouped by event type and | |
269 | 233 | aggregation key, sorted by count. |
270 | 234 | |
271 | This is used e.g. to get the what and how many reactions have happend | |
235 | This is used e.g. to get the what and how many reactions have happened | |
272 | 236 | on an event. |
273 | 237 | |
274 | 238 | Args: |
275 | event_id: Fetch events that relate to this event ID. | |
276 | room_id: The room the event belongs to. | |
277 | limit: Only fetch the `limit` groups. | |
239 | event_ids: Fetch events that relate to these event IDs. | |
278 | 240 | ignored_users: The users ignored by the requesting user. |
279 | 241 | |
280 | 242 | Returns: |
281 | List of groups of annotations that match. Each row is a dict with | |
282 | `type`, `key` and `count` fields. | |
243 | A map of event IDs to a list of groups of annotations that match. | |
244 | Each entry is a dict with `type`, `key` and `count` fields. | |
283 | 245 | """ |
284 | 246 | # Get the base results for all users. |
285 | full_results = await self._main_store.get_aggregation_groups_for_event( | |
286 | event_id, room_id, limit | |
287 | ) | |
247 | full_results = await self._main_store.get_aggregation_groups_for_events( | |
248 | event_ids | |
249 | ) | |
250 | ||
251 | # Avoid additional logic if there are no ignored users. | |
252 | if not ignored_users: | |
253 | return { | |
254 | event_id: results | |
255 | for event_id, results in full_results.items() | |
256 | if results | |
257 | } | |
288 | 258 | |
289 | 259 | # Then subtract off the results for any ignored users. |
290 | 260 | ignored_results = await self._main_store.get_aggregation_groups_for_users( |
291 | event_id, room_id, limit, ignored_users | |
292 | ) | |
293 | ||
294 | filtered_results = [] | |
295 | for result in full_results: | |
296 | key = (result["type"], result["key"]) | |
297 | if key in ignored_results: | |
298 | result = result.copy() | |
299 | result["count"] -= ignored_results[key] | |
300 | if result["count"] <= 0: | |
301 | continue | |
302 | filtered_results.append(result) | |
261 | [event_id for event_id, results in full_results.items() if results], | |
262 | ignored_users, | |
263 | ) | |
264 | ||
265 | filtered_results = {} | |
266 | for event_id, results in full_results.items(): | |
267 | # If no annotations, skip. | |
268 | if not results: | |
269 | continue | |
270 | ||
271 | # If there are not ignored results for this event, copy verbatim. | |
272 | if event_id not in ignored_results: | |
273 | filtered_results[event_id] = results | |
274 | continue | |
275 | ||
276 | # Otherwise, subtract out the ignored results. | |
277 | event_ignored_results = ignored_results[event_id] | |
278 | for result in results: | |
279 | key = (result["type"], result["key"]) | |
280 | if key in event_ignored_results: | |
281 | # Ensure to not modify the cache. | |
282 | result = result.copy() | |
283 | result["count"] -= event_ignored_results[key] | |
284 | if result["count"] <= 0: | |
285 | continue | |
286 | filtered_results.setdefault(event_id, []).append(result) | |
303 | 287 | |
304 | 288 | return filtered_results |
289 | ||
290 | async def get_references_for_events( | |
291 | self, event_ids: Collection[str], ignored_users: FrozenSet[str] = frozenset() | |
292 | ) -> Dict[str, List[_RelatedEvent]]: | |
293 | """Get a list of references to the given events. | |
294 | ||
295 | Args: | |
296 | event_ids: Fetch events that relate to this event ID. | |
297 | ignored_users: The users ignored by the requesting user. | |
298 | ||
299 | Returns: | |
300 | A map of event IDs to a list related events. | |
301 | """ | |
302 | ||
303 | related_events = await self._main_store.get_references_for_events(event_ids) | |
304 | ||
305 | # Avoid additional logic if there are no ignored users. | |
306 | if not ignored_users: | |
307 | return { | |
308 | event_id: results | |
309 | for event_id, results in related_events.items() | |
310 | if results | |
311 | } | |
312 | ||
313 | # Filter out ignored users. | |
314 | results = {} | |
315 | for event_id, events in related_events.items(): | |
316 | # If no references, skip. | |
317 | if not events: | |
318 | continue | |
319 | ||
320 | # Filter ignored users out. | |
321 | events = [event for event in events if event.sender not in ignored_users] | |
322 | # If there are no events left, skip this event. | |
323 | if not events: | |
324 | continue | |
325 | ||
326 | results[event_id] = events | |
327 | ||
328 | return results | |
305 | 329 | |
306 | 330 | async def _get_threads_for_events( |
307 | 331 | self, |
365 | 389 | results = {} |
366 | 390 | |
367 | 391 | for event_id, summary in summaries.items(): |
368 | if summary: | |
369 | thread_count, latest_thread_event = summary | |
370 | ||
371 | # Subtract off the count of any ignored users. | |
372 | for ignored_user in ignored_users: | |
373 | thread_count -= ignored_results.get((event_id, ignored_user), 0) | |
374 | ||
375 | # This is gnarly, but if the latest event is from an ignored user, | |
376 | # attempt to find one that isn't from an ignored user. | |
377 | if latest_thread_event.sender in ignored_users: | |
378 | room_id = latest_thread_event.room_id | |
379 | ||
380 | # If the root event is not found, something went wrong, do | |
381 | # not include a summary of the thread. | |
382 | event = await self._event_handler.get_event(user, room_id, event_id) | |
383 | if event is None: | |
384 | continue | |
385 | ||
386 | potential_events, _ = await self.get_relations_for_event( | |
387 | event_id, | |
388 | event, | |
389 | room_id, | |
390 | RelationTypes.THREAD, | |
391 | ignored_users, | |
392 | # If no thread, skip. | |
393 | if not summary: | |
394 | continue | |
395 | ||
396 | thread_count, latest_thread_event = summary | |
397 | ||
398 | # Subtract off the count of any ignored users. | |
399 | for ignored_user in ignored_users: | |
400 | thread_count -= ignored_results.get((event_id, ignored_user), 0) | |
401 | ||
402 | # This is gnarly, but if the latest event is from an ignored user, | |
403 | # attempt to find one that isn't from an ignored user. | |
404 | if latest_thread_event.sender in ignored_users: | |
405 | room_id = latest_thread_event.room_id | |
406 | ||
407 | # If the root event is not found, something went wrong, do | |
408 | # not include a summary of the thread. | |
409 | event = await self._event_handler.get_event(user, room_id, event_id) | |
410 | if event is None: | |
411 | continue | |
412 | ||
413 | # Attempt to find another event to use as the latest event. | |
414 | potential_events, _ = await self._main_store.get_relations_for_event( | |
415 | event_id, event, room_id, RelationTypes.THREAD, direction="f" | |
416 | ) | |
417 | ||
418 | # Filter out ignored users. | |
419 | potential_events = [ | |
420 | event | |
421 | for event in potential_events | |
422 | if event.sender not in ignored_users | |
423 | ] | |
424 | ||
425 | # If all found events are from ignored users, do not include | |
426 | # a summary of the thread. | |
427 | if not potential_events: | |
428 | continue | |
429 | ||
430 | # The *last* event returned is the one that is cared about. | |
431 | event = await self._event_handler.get_event( | |
432 | user, room_id, potential_events[-1].event_id | |
433 | ) | |
434 | # It is unexpected that the event will not exist. | |
435 | if event is None: | |
436 | logger.warning( | |
437 | "Unable to fetch latest event in a thread with event ID: %s", | |
438 | potential_events[-1].event_id, | |
392 | 439 | ) |
393 | ||
394 | # If all found events are from ignored users, do not include | |
395 | # a summary of the thread. | |
396 | if not potential_events: | |
397 | continue | |
398 | ||
399 | # The *last* event returned is the one that is cared about. | |
400 | event = await self._event_handler.get_event( | |
401 | user, room_id, potential_events[-1].event_id | |
402 | ) | |
403 | # It is unexpected that the event will not exist. | |
404 | if event is None: | |
405 | logger.warning( | |
406 | "Unable to fetch latest event in a thread with event ID: %s", | |
407 | potential_events[-1].event_id, | |
408 | ) | |
409 | continue | |
410 | latest_thread_event = event | |
411 | ||
412 | results[event_id] = _ThreadAggregation( | |
413 | latest_event=latest_thread_event, | |
414 | count=thread_count, | |
415 | # If there's a thread summary it must also exist in the | |
416 | # participated dictionary. | |
417 | current_user_participated=events_by_id[event_id].sender == user_id | |
418 | or participated[event_id], | |
419 | ) | |
440 | continue | |
441 | latest_thread_event = event | |
442 | ||
443 | results[event_id] = _ThreadAggregation( | |
444 | latest_event=latest_thread_event, | |
445 | count=thread_count, | |
446 | # If there's a thread summary it must also exist in the | |
447 | # participated dictionary. | |
448 | current_user_participated=events_by_id[event_id].sender == user_id | |
449 | or participated[event_id], | |
450 | ) | |
420 | 451 | |
421 | 452 | return results |
422 | 453 | |
495 | 526 | # (as that is what makes it part of the thread). |
496 | 527 | relations_by_id[latest_thread_event.event_id] = RelationTypes.THREAD |
497 | 528 | |
498 | # Fetch other relations per event. | |
499 | for event in events_by_id.values(): | |
500 | # Fetch any annotations (ie, reactions) to bundle with this event. | |
501 | annotations = await self.get_annotations_for_event( | |
502 | event.event_id, event.room_id, ignored_users=ignored_users | |
503 | ) | |
504 | if annotations: | |
505 | results.setdefault( | |
506 | event.event_id, BundledAggregations() | |
507 | ).annotations = {"chunk": annotations} | |
508 | ||
509 | # Fetch any references to bundle with this event. | |
510 | references, next_token = await self.get_relations_for_event( | |
511 | event.event_id, | |
512 | event, | |
513 | event.room_id, | |
514 | RelationTypes.REFERENCE, | |
515 | ignored_users=ignored_users, | |
516 | ) | |
517 | if references: | |
518 | aggregations = results.setdefault(event.event_id, BundledAggregations()) | |
519 | aggregations.references = { | |
520 | "chunk": [{"event_id": ev.event_id} for ev in references] | |
521 | } | |
522 | ||
523 | if next_token: | |
524 | aggregations.references["next_batch"] = await next_token.to_string( | |
525 | self._main_store | |
526 | ) | |
527 | ||
528 | # Fetch any edits (but not for redacted events). | |
529 | # | |
530 | # Note that there is no use in limiting edits by ignored users since the | |
531 | # parent event should be ignored in the first place if the user is ignored. | |
532 | edits = await self._main_store.get_applicable_edits( | |
533 | [ | |
534 | event_id | |
535 | for event_id, event in events_by_id.items() | |
536 | if not event.internal_metadata.is_redacted() | |
537 | ] | |
538 | ) | |
539 | for event_id, edit in edits.items(): | |
540 | results.setdefault(event_id, BundledAggregations()).replace = edit | |
529 | async def _fetch_annotations() -> None: | |
530 | """Fetch any annotations (ie, reactions) to bundle with this event.""" | |
531 | annotations_by_event_id = await self.get_annotations_for_events( | |
532 | events_by_id.keys(), ignored_users=ignored_users | |
533 | ) | |
534 | for event_id, annotations in annotations_by_event_id.items(): | |
535 | if annotations: | |
536 | results.setdefault(event_id, BundledAggregations()).annotations = { | |
537 | "chunk": annotations | |
538 | } | |
539 | ||
540 | async def _fetch_references() -> None: | |
541 | """Fetch any references to bundle with this event.""" | |
542 | references_by_event_id = await self.get_references_for_events( | |
543 | events_by_id.keys(), ignored_users=ignored_users | |
544 | ) | |
545 | for event_id, references in references_by_event_id.items(): | |
546 | if references: | |
547 | results.setdefault(event_id, BundledAggregations()).references = { | |
548 | "chunk": [{"event_id": ev.event_id} for ev in references] | |
549 | } | |
550 | ||
551 | async def _fetch_edits() -> None: | |
552 | """ | |
553 | Fetch any edits (but not for redacted events). | |
554 | ||
555 | Note that there is no use in limiting edits by ignored users since the | |
556 | parent event should be ignored in the first place if the user is ignored. | |
557 | """ | |
558 | edits = await self._main_store.get_applicable_edits( | |
559 | [ | |
560 | event_id | |
561 | for event_id, event in events_by_id.items() | |
562 | if not event.internal_metadata.is_redacted() | |
563 | ] | |
564 | ) | |
565 | for event_id, edit in edits.items(): | |
566 | results.setdefault(event_id, BundledAggregations()).replace = edit | |
567 | ||
568 | # Parallelize the calls for annotations, references, and edits since they | |
569 | # are unrelated. | |
570 | await make_deferred_yieldable( | |
571 | gather_results( | |
572 | ( | |
573 | run_in_background(_fetch_annotations), | |
574 | run_in_background(_fetch_references), | |
575 | run_in_background(_fetch_edits), | |
576 | ) | |
577 | ) | |
578 | ) | |
541 | 579 | |
542 | 580 | return results |
543 | 581 | |
570 | 608 | room_id, requester, allow_departed_users=True |
571 | 609 | ) |
572 | 610 | |
573 | # Note that ignored users are not passed into get_relations_for_event | |
611 | # Note that ignored users are not passed into get_threads | |
574 | 612 | # below. Ignored users are handled in filter_events_for_client (and by |
575 | 613 | # not passing them in here we should get a better cache hit rate). |
576 | 614 | thread_roots, next_batch = await self._main_store.get_threads( |
440 | 440 | client_redirect_url: where the client wants to redirect to |
441 | 441 | |
442 | 442 | Returns: |
443 | dict: A dict containing new user attributes. Possible keys: | |
443 | A dict containing new user attributes. Possible keys: | |
444 | 444 | * mxid_localpart (str): Required. The localpart of the user's mxid |
445 | 445 | * displayname (str): The displayname of the user |
446 | 446 | * emails (list[str]): Any emails for the user |
482 | 482 | Args: |
483 | 483 | config: A dictionary containing configuration options for this provider |
484 | 484 | Returns: |
485 | SamlConfig: A custom config object for this module | |
485 | A custom config object for this module | |
486 | 486 | """ |
487 | 487 | # Parse config options and use defaults where necessary |
488 | 488 | mxid_source_attribute = config.get("mxid_source_attribute", "uid") |
14 | 14 | from typing import TYPE_CHECKING, Optional |
15 | 15 | |
16 | 16 | from synapse.api.errors import Codes, StoreError, SynapseError |
17 | from synapse.handlers.device import DeviceHandler | |
17 | 18 | from synapse.types import Requester |
18 | 19 | |
19 | 20 | if TYPE_CHECKING: |
28 | 29 | def __init__(self, hs: "HomeServer"): |
29 | 30 | self.store = hs.get_datastores().main |
30 | 31 | self._auth_handler = hs.get_auth_handler() |
31 | self._device_handler = hs.get_device_handler() | |
32 | # This can only be instantiated on the main process. | |
33 | device_handler = hs.get_device_handler() | |
34 | assert isinstance(device_handler, DeviceHandler) | |
35 | self._device_handler = device_handler | |
32 | 36 | |
33 | 37 | async def set_password( |
34 | 38 | self, |
11 | 11 | # See the License for the specific language governing permissions and |
12 | 12 | # limitations under the License. |
13 | 13 | import abc |
14 | import hashlib | |
15 | import io | |
14 | 16 | import logging |
15 | 17 | from typing import ( |
16 | 18 | TYPE_CHECKING, |
36 | 38 | from synapse.api.constants import LoginType |
37 | 39 | from synapse.api.errors import Codes, NotFoundError, RedirectException, SynapseError |
38 | 40 | from synapse.config.sso import SsoAttributeRequirement |
41 | from synapse.handlers.device import DeviceHandler | |
39 | 42 | from synapse.handlers.register import init_counters_for_auth_provider |
40 | 43 | from synapse.handlers.ui_auth import UIAuthSessionDataConstants |
41 | 44 | from synapse.http import get_request_user_agent |
136 | 139 | localpart: Optional[str] |
137 | 140 | confirm_localpart: bool = False |
138 | 141 | display_name: Optional[str] = None |
142 | picture: Optional[str] = None | |
139 | 143 | emails: Collection[str] = attr.Factory(list) |
140 | 144 | |
141 | 145 | |
194 | 198 | self._error_template = hs.config.sso.sso_error_template |
195 | 199 | self._bad_user_template = hs.config.sso.sso_auth_bad_user_template |
196 | 200 | self._profile_handler = hs.get_profile_handler() |
201 | self._media_repo = ( | |
202 | hs.get_media_repository() if hs.config.media.can_load_media_repo else None | |
203 | ) | |
204 | self._http_client = hs.get_proxied_blacklisted_http_client() | |
197 | 205 | |
198 | 206 | # The following template is shown after a successful user interactive |
199 | 207 | # authentication session. It tells the user they can close the window. |
493 | 501 | await self._profile_handler.set_displayname( |
494 | 502 | user_id_obj, requester, attributes.display_name, True |
495 | 503 | ) |
504 | if attributes.picture: | |
505 | await self.set_avatar(user_id, attributes.picture) | |
496 | 506 | |
497 | 507 | await self._auth_handler.complete_sso_login( |
498 | 508 | user_id, |
701 | 711 | await self._store.record_user_external_id( |
702 | 712 | auth_provider_id, remote_user_id, registered_user_id |
703 | 713 | ) |
714 | ||
715 | # Set avatar, if available | |
716 | if attributes.picture: | |
717 | await self.set_avatar(registered_user_id, attributes.picture) | |
718 | ||
704 | 719 | return registered_user_id |
720 | ||
721 | async def set_avatar(self, user_id: str, picture_https_url: str) -> bool: | |
722 | """Set avatar of the user. | |
723 | ||
724 | This downloads the image file from the URL provided, stores that in | |
725 | the media repository and then sets the avatar on the user's profile. | |
726 | ||
727 | It can detect if the same image is being saved again and bails early by storing | |
728 | the hash of the file in the `upload_name` of the avatar image. | |
729 | ||
730 | Currently, it only supports server configurations which run the media repository | |
731 | within the same process. | |
732 | ||
733 | It silently fails and logs a warning by raising an exception and catching it | |
734 | internally if: | |
735 | * it is unable to fetch the image itself (non 200 status code) or | |
736 | * the image supplied is bigger than max allowed size or | |
737 | * the image type is not one of the allowed image types. | |
738 | ||
739 | Args: | |
740 | user_id: matrix user ID in the form @localpart:domain as a string. | |
741 | ||
742 | picture_https_url: HTTPS url for the picture image file. | |
743 | ||
744 | Returns: `True` if the user's avatar has been successfully set to the image at | |
745 | `picture_https_url`. | |
746 | """ | |
747 | if self._media_repo is None: | |
748 | logger.info( | |
749 | "failed to set user avatar because out-of-process media repositories " | |
750 | "are not supported yet " | |
751 | ) | |
752 | return False | |
753 | ||
754 | try: | |
755 | uid = UserID.from_string(user_id) | |
756 | ||
757 | def is_allowed_mime_type(content_type: str) -> bool: | |
758 | if ( | |
759 | self._profile_handler.allowed_avatar_mimetypes | |
760 | and content_type | |
761 | not in self._profile_handler.allowed_avatar_mimetypes | |
762 | ): | |
763 | return False | |
764 | return True | |
765 | ||
766 | # download picture, enforcing size limit & mime type check | |
767 | picture = io.BytesIO() | |
768 | ||
769 | content_length, headers, uri, code = await self._http_client.get_file( | |
770 | url=picture_https_url, | |
771 | output_stream=picture, | |
772 | max_size=self._profile_handler.max_avatar_size, | |
773 | is_allowed_content_type=is_allowed_mime_type, | |
774 | ) | |
775 | ||
776 | if code != 200: | |
777 | raise Exception( | |
778 | "GET request to download sso avatar image returned {}".format(code) | |
779 | ) | |
780 | ||
781 | # upload name includes hash of the image file's content so that we can | |
782 | # easily check if it requires an update or not, the next time user logs in | |
783 | upload_name = "sso_avatar_" + hashlib.sha256(picture.read()).hexdigest() | |
784 | ||
785 | # bail if user already has the same avatar | |
786 | profile = await self._profile_handler.get_profile(user_id) | |
787 | if profile["avatar_url"] is not None: | |
788 | server_name = profile["avatar_url"].split("/")[-2] | |
789 | media_id = profile["avatar_url"].split("/")[-1] | |
790 | if server_name == self._server_name: | |
791 | media = await self._media_repo.store.get_local_media(media_id) | |
792 | if media is not None and upload_name == media["upload_name"]: | |
793 | logger.info("skipping saving the user avatar") | |
794 | return True | |
795 | ||
796 | # store it in media repository | |
797 | avatar_mxc_url = await self._media_repo.create_content( | |
798 | media_type=headers[b"Content-Type"][0].decode("utf-8"), | |
799 | upload_name=upload_name, | |
800 | content=picture, | |
801 | content_length=content_length, | |
802 | auth_user=uid, | |
803 | ) | |
804 | ||
805 | # save it as user avatar | |
806 | await self._profile_handler.set_avatar_url( | |
807 | uid, | |
808 | create_requester(uid), | |
809 | str(avatar_mxc_url), | |
810 | ) | |
811 | ||
812 | logger.info("successfully saved the user avatar") | |
813 | return True | |
814 | except Exception: | |
815 | logger.warning("failed to save the user avatar") | |
816 | return False | |
705 | 817 | |
706 | 818 | async def complete_sso_ui_auth_request( |
707 | 819 | self, |
1034 | 1146 | ) -> None: |
1035 | 1147 | """Revoke any devices and in-flight logins tied to a provider session. |
1036 | 1148 | |
1149 | Can only be called from the main process. | |
1150 | ||
1037 | 1151 | Args: |
1038 | 1152 | auth_provider_id: A unique identifier for this SSO provider, e.g. |
1039 | 1153 | "oidc" or "saml". |
1041 | 1155 | expected_user_id: The user we're expecting to logout. If set, it will ignore |
1042 | 1156 | sessions belonging to other users and log an error. |
1043 | 1157 | """ |
1158 | ||
1159 | # It is expected that this is the main process. | |
1160 | assert isinstance( | |
1161 | self._device_handler, DeviceHandler | |
1162 | ), "revoking SSO sessions can only be called on the main process" | |
1163 | ||
1044 | 1164 | # Invalidate any running user-mapping sessions |
1045 | 1165 | to_delete = [] |
1046 | 1166 | for session_id, session in self._username_mapping_sessions.items(): |
1425 | 1425 | |
1426 | 1426 | logger.debug("Fetching OTK data") |
1427 | 1427 | device_id = sync_config.device_id |
1428 | one_time_key_counts: JsonDict = {} | |
1428 | one_time_keys_count: JsonDict = {} | |
1429 | 1429 | unused_fallback_key_types: List[str] = [] |
1430 | 1430 | if device_id: |
1431 | 1431 | # TODO: We should have a way to let clients differentiate between the states of: |
1432 | 1432 | # * no change in OTK count since the provided since token |
1433 | 1433 | # * the server has zero OTKs left for this device |
1434 | 1434 | # Spec issue: https://github.com/matrix-org/matrix-doc/issues/3298 |
1435 | one_time_key_counts = await self.store.count_e2e_one_time_keys( | |
1435 | one_time_keys_count = await self.store.count_e2e_one_time_keys( | |
1436 | 1436 | user_id, device_id |
1437 | 1437 | ) |
1438 | 1438 | unused_fallback_key_types = ( |
1462 | 1462 | archived=sync_result_builder.archived, |
1463 | 1463 | to_device=sync_result_builder.to_device, |
1464 | 1464 | device_lists=device_lists, |
1465 | device_one_time_keys_count=one_time_key_counts, | |
1465 | device_one_time_keys_count=one_time_keys_count, | |
1466 | 1466 | device_unused_fallback_key_types=unused_fallback_key_types, |
1467 | 1467 | next_batch=sync_result_builder.now_token, |
1468 | 1468 | ) |
44 | 44 | |
45 | 45 | Args: |
46 | 46 | hs: homeserver |
47 | handler ((twisted.web.server.Request) -> twisted.internet.defer.Deferred): | |
48 | function to be called to handle the request. | |
47 | handler: function to be called to handle the request. | |
49 | 48 | """ |
50 | 49 | super().__init__() |
51 | 50 | self._handler = handler |
154 | 154 | a file for a file upload). Or None if the request is to have |
155 | 155 | no body. |
156 | 156 | Returns: |
157 | Deferred[twisted.web.iweb.IResponse]: | |
158 | fires when the header of the response has been received (regardless of the | |
159 | response status code). Fails if there is any problem which prevents that | |
160 | response from being received (including problems that prevent the request | |
161 | from being sent). | |
157 | A deferred which fires when the header of the response has been received | |
158 | (regardless of the response status code). Fails if there is any problem | |
159 | which prevents that response from being received (including problems that | |
160 | prevent the request from being sent). | |
162 | 161 | """ |
163 | 162 | # We use urlparse as that will set `port` to None if there is no |
164 | 163 | # explicit port. |
950 | 950 | |
951 | 951 | args: query params |
952 | 952 | Returns: |
953 | dict|list: Succeeds when we get a 2xx HTTP response. The | |
954 | result will be the decoded JSON body. | |
953 | Succeeds when we get a 2xx HTTP response. The result will be the decoded JSON body. | |
955 | 954 | |
956 | 955 | Raises: |
957 | 956 | HttpResponseException: If we get an HTTP response code >= 300 |
33 | 33 | ) |
34 | 34 | from twisted.web.error import SchemeNotSupported |
35 | 35 | from twisted.web.http_headers import Headers |
36 | from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS | |
36 | from twisted.web.iweb import IAgent, IBodyProducer, IPolicyForHTTPS, IResponse | |
37 | 37 | |
38 | 38 | from synapse.http import redact_uri |
39 | 39 | from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint, ProxyCredentials |
133 | 133 | uri: bytes, |
134 | 134 | headers: Optional[Headers] = None, |
135 | 135 | bodyProducer: Optional[IBodyProducer] = None, |
136 | ) -> defer.Deferred: | |
136 | ) -> "defer.Deferred[IResponse]": | |
137 | 137 | """ |
138 | 138 | Issue a request to the server indicated by the given uri. |
139 | 139 | |
156 | 156 | a file upload). Or, None if the request is to have no body. |
157 | 157 | |
158 | 158 | Returns: |
159 | Deferred[IResponse]: completes when the header of the response has | |
160 | been received (regardless of the response status code). | |
161 | ||
162 | Can fail with: | |
163 | SchemeNotSupported: if the uri is not http or https | |
164 | ||
165 | twisted.internet.error.TimeoutError if the server we are connecting | |
166 | to (proxy or destination) does not accept a connection before | |
167 | connectTimeout. | |
168 | ||
169 | ... other things too. | |
159 | A deferred which completes when the header of the response has | |
160 | been received (regardless of the response status code). | |
161 | ||
162 | Can fail with: | |
163 | SchemeNotSupported: if the uri is not http or https | |
164 | ||
165 | twisted.internet.error.TimeoutError if the server we are connecting | |
166 | to (proxy or destination) does not accept a connection before | |
167 | connectTimeout. | |
168 | ||
169 | ... other things too. | |
170 | 170 | """ |
171 | 171 | uri = uri.strip() |
172 | 172 | if not _VALID_URI.match(uri): |
266 | 266 | request. The first argument will be the request object and |
267 | 267 | subsequent arguments will be any matched groups from the regex. |
268 | 268 | This should return either tuple of (code, response), or None. |
269 | servlet_classname (str): The name of the handler to be used in prometheus | |
269 | servlet_classname: The name of the handler to be used in prometheus | |
270 | 270 | and opentracing logs. |
271 | 271 | """ |
272 | 272 |
399 | 399 | be sure to call finished_processing. |
400 | 400 | |
401 | 401 | Args: |
402 | servlet_name (str): the name of the servlet which will be | |
402 | servlet_name: the name of the servlet which will be | |
403 | 403 | processing this request. This is used in the metrics. |
404 | 404 | |
405 | 405 | It is possible to update this afterwards by updating |
116 | 116 | """Create a new ContextResourceUsage |
117 | 117 | |
118 | 118 | Args: |
119 | copy_from (ContextResourceUsage|None): if not None, an object to | |
120 | copy stats from | |
119 | copy_from: if not None, an object to copy stats from | |
121 | 120 | """ |
122 | 121 | if copy_from is None: |
123 | 122 | self.reset() |
161 | 160 | """Add another ContextResourceUsage's stats to this one's. |
162 | 161 | |
163 | 162 | Args: |
164 | other (ContextResourceUsage): the other resource usage object | |
163 | other: the other resource usage object | |
165 | 164 | """ |
166 | 165 | self.ru_utime += other.ru_utime |
167 | 166 | self.ru_stime += other.ru_stime |
341 | 340 | called directly. |
342 | 341 | |
343 | 342 | Returns: |
344 | LoggingContext: the current logging context | |
343 | The current logging context | |
345 | 344 | """ |
346 | 345 | warnings.warn( |
347 | 346 | "synapse.logging.context.LoggingContext.current_context() is deprecated " |
361 | 360 | called directly. |
362 | 361 | |
363 | 362 | Args: |
364 | context(LoggingContext): The context to activate. | |
363 | context: The context to activate. | |
364 | ||
365 | 365 | Returns: |
366 | 366 | The context that was previously active |
367 | 367 | """ |
473 | 473 | """Get resources used by this logcontext so far. |
474 | 474 | |
475 | 475 | Returns: |
476 | ContextResourceUsage: a *copy* of the object tracking resource | |
477 | usage so far | |
476 | A *copy* of the object tracking resource usage so far | |
478 | 477 | """ |
479 | 478 | # we always return a copy, for consistency |
480 | 479 | res = self._resource_usage.copy() |
662 | 661 | def set_current_context(context: LoggingContextOrSentinel) -> LoggingContextOrSentinel: |
663 | 662 | """Set the current logging context in thread local storage |
664 | 663 | Args: |
665 | context(LoggingContext): The context to activate. | |
664 | context: The context to activate. | |
665 | ||
666 | 666 | Returns: |
667 | 667 | The context that was previously active |
668 | 668 | """ |
699 | 699 | suffix: suffix to add to the parent context's 'name'. |
700 | 700 | |
701 | 701 | Returns: |
702 | LoggingContext: new logging context. | |
702 | A new logging context. | |
703 | 703 | """ |
704 | 704 | curr_context = current_context() |
705 | 705 | if not curr_context: |
897 | 897 | on it. |
898 | 898 | |
899 | 899 | Args: |
900 | reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread | |
901 | the Deferred will be invoked, and whose threadpool we should use for the | |
902 | function. | |
900 | reactor: The reactor in whose main thread the Deferred will be invoked, | |
901 | and whose threadpool we should use for the function. | |
903 | 902 | |
904 | 903 | Normally this will be hs.get_reactor(). |
905 | 904 | |
906 | f (callable): The function to call. | |
905 | f: The function to call. | |
907 | 906 | |
908 | 907 | args: positional arguments to pass to f. |
909 | 908 | |
910 | 909 | kwargs: keyword arguments to pass to f. |
911 | 910 | |
912 | 911 | Returns: |
913 | Deferred: A Deferred which fires a callback with the result of `f`, or an | |
912 | A Deferred which fires a callback with the result of `f`, or an | |
914 | 913 | errback if `f` throws an exception. |
915 | 914 | """ |
916 | 915 | return defer_to_threadpool(reactor, reactor.getThreadPool(), f, *args, **kwargs) |
938 | 937 | on it. |
939 | 938 | |
940 | 939 | Args: |
941 | reactor (twisted.internet.base.ReactorBase): The reactor in whose main thread | |
942 | the Deferred will be invoked. Normally this will be hs.get_reactor(). | |
943 | ||
944 | threadpool (twisted.python.threadpool.ThreadPool): The threadpool to use for | |
945 | running `f`. Normally this will be hs.get_reactor().getThreadPool(). | |
946 | ||
947 | f (callable): The function to call. | |
940 | reactor: The reactor in whose main thread the Deferred will be invoked. | |
941 | Normally this will be hs.get_reactor(). | |
942 | ||
943 | threadpool: The threadpool to use for running `f`. Normally this will be | |
944 | hs.get_reactor().getThreadPool(). | |
945 | ||
946 | f: The function to call. | |
948 | 947 | |
949 | 948 | args: positional arguments to pass to f. |
950 | 949 | |
951 | 950 | kwargs: keyword arguments to pass to f. |
952 | 951 | |
953 | 952 | Returns: |
954 | Deferred: A Deferred which fires a callback with the result of `f`, or an | |
953 | A Deferred which fires a callback with the result of `f`, or an | |
955 | 954 | errback if `f` throws an exception. |
956 | 955 | """ |
957 | 956 | curr_context = current_context() |
720 | 720 | destination: address of entity receiving the span context. Must be given unless |
721 | 721 | check_destination is False. The context will only be injected if the |
722 | 722 | destination matches the opentracing whitelist |
723 | check_destination (bool): If false, destination will be ignored and the context | |
723 | check_destination: If false, destination will be ignored and the context | |
724 | 724 | will always be injected. |
725 | 725 | |
726 | 726 | Note: |
779 | 779 | destination: the name of the remote server. |
780 | 780 | |
781 | 781 | Returns: |
782 | dict: the active span's context if opentracing is enabled, otherwise empty. | |
782 | the active span's context if opentracing is enabled, otherwise empty. | |
783 | 783 | """ |
784 | 784 | |
785 | 785 | if destination and not whitelisted_homeserver(destination): |
46 | 46 | # This module is imported for its side effects; flake8 needn't warn that it's unused. |
47 | 47 | import synapse.metrics._reactor_metrics # noqa: F401 |
48 | 48 | from synapse.metrics._gc import MIN_TIME_BETWEEN_GCS, install_gc_manager |
49 | from synapse.metrics._legacy_exposition import ( | |
50 | MetricsResource, | |
51 | generate_latest, | |
52 | start_http_server, | |
53 | ) | |
49 | from synapse.metrics._twisted_exposition import MetricsResource, generate_latest | |
54 | 50 | from synapse.metrics._types import Collector |
55 | 51 | from synapse.util import SYNAPSE_VERSION |
56 | 52 | |
473 | 469 | "Collector", |
474 | 470 | "MetricsResource", |
475 | 471 | "generate_latest", |
476 | "start_http_server", | |
477 | 472 | "LaterGauge", |
478 | 473 | "InFlightGauge", |
479 | 474 | "GaugeBucketCollector", |
0 | # Copyright 2015-2019 Prometheus Python Client Developers | |
1 | # Copyright 2019 Matrix.org Foundation C.I.C. | |
2 | # | |
3 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | # you may not use this file except in compliance with the License. | |
5 | # You may obtain a copy of the License at | |
6 | # | |
7 | # http://www.apache.org/licenses/LICENSE-2.0 | |
8 | # | |
9 | # Unless required by applicable law or agreed to in writing, software | |
10 | # distributed under the License is distributed on an "AS IS" BASIS, | |
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | # See the License for the specific language governing permissions and | |
13 | # limitations under the License. | |
14 | ||
15 | """ | |
16 | This code is based off `prometheus_client/exposition.py` from version 0.7.1. | |
17 | ||
18 | Due to the renaming of metrics in prometheus_client 0.4.0, this customised | |
19 | vendoring of the code will emit both the old versions that Synapse dashboards | |
20 | expect, and the newer "best practice" version of the up-to-date official client. | |
21 | """ | |
22 | import logging | |
23 | import math | |
24 | import threading | |
25 | from http.server import BaseHTTPRequestHandler, HTTPServer | |
26 | from socketserver import ThreadingMixIn | |
27 | from typing import Any, Dict, List, Type, Union | |
28 | from urllib.parse import parse_qs, urlparse | |
29 | ||
30 | from prometheus_client import REGISTRY, CollectorRegistry | |
31 | from prometheus_client.core import Sample | |
32 | ||
33 | from twisted.web.resource import Resource | |
34 | from twisted.web.server import Request | |
35 | ||
36 | logger = logging.getLogger(__name__) | |
37 | CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8" | |
38 | ||
39 | ||
40 | def floatToGoString(d: Union[int, float]) -> str: | |
41 | d = float(d) | |
42 | if d == math.inf: | |
43 | return "+Inf" | |
44 | elif d == -math.inf: | |
45 | return "-Inf" | |
46 | elif math.isnan(d): | |
47 | return "NaN" | |
48 | else: | |
49 | s = repr(d) | |
50 | dot = s.find(".") | |
51 | # Go switches to exponents sooner than Python. | |
52 | # We only need to care about positive values for le/quantile. | |
53 | if d > 0 and dot > 6: | |
54 | mantissa = f"{s[0]}.{s[1:dot]}{s[dot + 1 :]}".rstrip("0.") | |
55 | return f"{mantissa}e+0{dot - 1}" | |
56 | return s | |
57 | ||
58 | ||
59 | def sample_line(line: Sample, name: str) -> str: | |
60 | if line.labels: | |
61 | labelstr = "{{{0}}}".format( | |
62 | ",".join( | |
63 | [ | |
64 | '{}="{}"'.format( | |
65 | k, | |
66 | v.replace("\\", r"\\").replace("\n", r"\n").replace('"', r"\""), | |
67 | ) | |
68 | for k, v in sorted(line.labels.items()) | |
69 | ] | |
70 | ) | |
71 | ) | |
72 | else: | |
73 | labelstr = "" | |
74 | timestamp = "" | |
75 | if line.timestamp is not None: | |
76 | # Convert to milliseconds. | |
77 | timestamp = f" {int(float(line.timestamp) * 1000):d}" | |
78 | return "{}{} {}{}\n".format(name, labelstr, floatToGoString(line.value), timestamp) | |
79 | ||
80 | ||
81 | # Mapping from new metric names to legacy metric names. | |
82 | # We translate these back to their old names when exposing them through our | |
83 | # legacy vendored exporter. | |
84 | # Only this legacy exposition module applies these name changes. | |
85 | LEGACY_METRIC_NAMES = { | |
86 | "synapse_util_caches_cache_hits": "synapse_util_caches_cache:hits", | |
87 | "synapse_util_caches_cache_size": "synapse_util_caches_cache:size", | |
88 | "synapse_util_caches_cache_evicted_size": "synapse_util_caches_cache:evicted_size", | |
89 | "synapse_util_caches_cache": "synapse_util_caches_cache:total", | |
90 | "synapse_util_caches_response_cache_size": "synapse_util_caches_response_cache:size", | |
91 | "synapse_util_caches_response_cache_hits": "synapse_util_caches_response_cache:hits", | |
92 | "synapse_util_caches_response_cache_evicted_size": "synapse_util_caches_response_cache:evicted_size", | |
93 | "synapse_util_caches_response_cache": "synapse_util_caches_response_cache:total", | |
94 | "synapse_federation_client_sent_pdu_destinations": "synapse_federation_client_sent_pdu_destinations:total", | |
95 | "synapse_federation_client_sent_pdu_destinations_count": "synapse_federation_client_sent_pdu_destinations:count", | |
96 | "synapse_admin_mau_current": "synapse_admin_mau:current", | |
97 | "synapse_admin_mau_max": "synapse_admin_mau:max", | |
98 | "synapse_admin_mau_registered_reserved_users": "synapse_admin_mau:registered_reserved_users", | |
99 | } | |
100 | ||
101 | ||
102 | def generate_latest(registry: CollectorRegistry, emit_help: bool = False) -> bytes: | |
103 | """ | |
104 | Generate metrics in legacy format. Modern metrics are generated directly | |
105 | by prometheus-client. | |
106 | """ | |
107 | ||
108 | output = [] | |
109 | ||
110 | for metric in registry.collect(): | |
111 | if not metric.samples: | |
112 | # No samples, don't bother. | |
113 | continue | |
114 | ||
115 | # Translate to legacy metric name if it has one. | |
116 | mname = LEGACY_METRIC_NAMES.get(metric.name, metric.name) | |
117 | mnewname = metric.name | |
118 | mtype = metric.type | |
119 | ||
120 | # OpenMetrics -> Prometheus | |
121 | if mtype == "counter": | |
122 | mnewname = mnewname + "_total" | |
123 | elif mtype == "info": | |
124 | mtype = "gauge" | |
125 | mnewname = mnewname + "_info" | |
126 | elif mtype == "stateset": | |
127 | mtype = "gauge" | |
128 | elif mtype == "gaugehistogram": | |
129 | mtype = "histogram" | |
130 | elif mtype == "unknown": | |
131 | mtype = "untyped" | |
132 | ||
133 | # Output in the old format for compatibility. | |
134 | if emit_help: | |
135 | output.append( | |
136 | "# HELP {} {}\n".format( | |
137 | mname, | |
138 | metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), | |
139 | ) | |
140 | ) | |
141 | output.append(f"# TYPE {mname} {mtype}\n") | |
142 | ||
143 | om_samples: Dict[str, List[str]] = {} | |
144 | for s in metric.samples: | |
145 | for suffix in ["_created", "_gsum", "_gcount"]: | |
146 | if s.name == mname + suffix: | |
147 | # OpenMetrics specific sample, put in a gauge at the end. | |
148 | # (these come from gaugehistograms which don't get renamed, | |
149 | # so no need to faff with mnewname) | |
150 | om_samples.setdefault(suffix, []).append(sample_line(s, s.name)) | |
151 | break | |
152 | else: | |
153 | newname = s.name.replace(mnewname, mname) | |
154 | if ":" in newname and newname.endswith("_total"): | |
155 | newname = newname[: -len("_total")] | |
156 | output.append(sample_line(s, newname)) | |
157 | ||
158 | for suffix, lines in sorted(om_samples.items()): | |
159 | if emit_help: | |
160 | output.append( | |
161 | "# HELP {}{} {}\n".format( | |
162 | mname, | |
163 | suffix, | |
164 | metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), | |
165 | ) | |
166 | ) | |
167 | output.append(f"# TYPE {mname}{suffix} gauge\n") | |
168 | output.extend(lines) | |
169 | ||
170 | # Get rid of the weird colon things while we're at it | |
171 | if mtype == "counter": | |
172 | mnewname = mnewname.replace(":total", "") | |
173 | mnewname = mnewname.replace(":", "_") | |
174 | ||
175 | if mname == mnewname: | |
176 | continue | |
177 | ||
178 | # Also output in the new format, if it's different. | |
179 | if emit_help: | |
180 | output.append( | |
181 | "# HELP {} {}\n".format( | |
182 | mnewname, | |
183 | metric.documentation.replace("\\", r"\\").replace("\n", r"\n"), | |
184 | ) | |
185 | ) | |
186 | output.append(f"# TYPE {mnewname} {mtype}\n") | |
187 | ||
188 | for s in metric.samples: | |
189 | # Get rid of the OpenMetrics specific samples (we should already have | |
190 | # dealt with them above anyway.) | |
191 | for suffix in ["_created", "_gsum", "_gcount"]: | |
192 | if s.name == mname + suffix: | |
193 | break | |
194 | else: | |
195 | sample_name = LEGACY_METRIC_NAMES.get(s.name, s.name) | |
196 | output.append( | |
197 | sample_line(s, sample_name.replace(":total", "").replace(":", "_")) | |
198 | ) | |
199 | ||
200 | return "".join(output).encode("utf-8") | |
201 | ||
202 | ||
203 | class MetricsHandler(BaseHTTPRequestHandler): | |
204 | """HTTP handler that gives metrics from ``REGISTRY``.""" | |
205 | ||
206 | registry = REGISTRY | |
207 | ||
208 | def do_GET(self) -> None: | |
209 | registry = self.registry | |
210 | params = parse_qs(urlparse(self.path).query) | |
211 | ||
212 | if "help" in params: | |
213 | emit_help = True | |
214 | else: | |
215 | emit_help = False | |
216 | ||
217 | try: | |
218 | output = generate_latest(registry, emit_help=emit_help) | |
219 | except Exception: | |
220 | self.send_error(500, "error generating metric output") | |
221 | raise | |
222 | try: | |
223 | self.send_response(200) | |
224 | self.send_header("Content-Type", CONTENT_TYPE_LATEST) | |
225 | self.send_header("Content-Length", str(len(output))) | |
226 | self.end_headers() | |
227 | self.wfile.write(output) | |
228 | except BrokenPipeError as e: | |
229 | logger.warning( | |
230 | "BrokenPipeError when serving metrics (%s). Did Prometheus restart?", e | |
231 | ) | |
232 | ||
233 | def log_message(self, format: str, *args: Any) -> None: | |
234 | """Log nothing.""" | |
235 | ||
236 | @classmethod | |
237 | def factory(cls, registry: CollectorRegistry) -> Type: | |
238 | """Returns a dynamic MetricsHandler class tied | |
239 | to the passed registry. | |
240 | """ | |
241 | # This implementation relies on MetricsHandler.registry | |
242 | # (defined above and defaulted to REGISTRY). | |
243 | ||
244 | # As we have unicode_literals, we need to create a str() | |
245 | # object for type(). | |
246 | cls_name = str(cls.__name__) | |
247 | MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry}) | |
248 | return MyMetricsHandler | |
249 | ||
250 | ||
251 | class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer): | |
252 | """Thread per request HTTP server.""" | |
253 | ||
254 | # Make worker threads "fire and forget". Beginning with Python 3.7 this | |
255 | # prevents a memory leak because ``ThreadingMixIn`` starts to gather all | |
256 | # non-daemon threads in a list in order to join on them at server close. | |
257 | # Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the | |
258 | # same as Python 3.7's ``ThreadingHTTPServer``. | |
259 | daemon_threads = True | |
260 | ||
261 | ||
262 | def start_http_server( | |
263 | port: int, addr: str = "", registry: CollectorRegistry = REGISTRY | |
264 | ) -> None: | |
265 | """Starts an HTTP server for prometheus metrics as a daemon thread""" | |
266 | CustomMetricsHandler = MetricsHandler.factory(registry) | |
267 | httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler) | |
268 | t = threading.Thread(target=httpd.serve_forever) | |
269 | t.daemon = True | |
270 | t.start() | |
271 | ||
272 | ||
273 | class MetricsResource(Resource): | |
274 | """ | |
275 | Twisted ``Resource`` that serves prometheus metrics. | |
276 | """ | |
277 | ||
278 | isLeaf = True | |
279 | ||
280 | def __init__(self, registry: CollectorRegistry = REGISTRY): | |
281 | self.registry = registry | |
282 | ||
283 | def render_GET(self, request: Request) -> bytes: | |
284 | request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii")) | |
285 | response = generate_latest(self.registry) | |
286 | request.setHeader(b"Content-Length", str(len(response))) | |
287 | return response |
0 | # Copyright 2015-2019 Prometheus Python Client Developers | |
1 | # Copyright 2019 Matrix.org Foundation C.I.C. | |
2 | # | |
3 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
4 | # you may not use this file except in compliance with the License. | |
5 | # You may obtain a copy of the License at | |
6 | # | |
7 | # http://www.apache.org/licenses/LICENSE-2.0 | |
8 | # | |
9 | # Unless required by applicable law or agreed to in writing, software | |
10 | # distributed under the License is distributed on an "AS IS" BASIS, | |
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
12 | # See the License for the specific language governing permissions and | |
13 | # limitations under the License. | |
14 | ||
15 | from prometheus_client import REGISTRY, CollectorRegistry, generate_latest | |
16 | ||
17 | from twisted.web.resource import Resource | |
18 | from twisted.web.server import Request | |
19 | ||
20 | CONTENT_TYPE_LATEST = "text/plain; version=0.0.4; charset=utf-8" | |
21 | ||
22 | ||
23 | class MetricsResource(Resource): | |
24 | """ | |
25 | Twisted ``Resource`` that serves prometheus metrics. | |
26 | """ | |
27 | ||
28 | isLeaf = True | |
29 | ||
30 | def __init__(self, registry: CollectorRegistry = REGISTRY): | |
31 | self.registry = registry | |
32 | ||
33 | def render_GET(self, request: Request) -> bytes: | |
34 | request.setHeader(b"Content-Type", CONTENT_TYPE_LATEST.encode("ascii")) | |
35 | response = generate_latest(self.registry) | |
36 | request.setHeader(b"Content-Length", str(len(response))) | |
37 | return response |
53 | 53 | |
54 | 54 | async def setup(self) -> None: |
55 | 55 | """Keep the gauges for common usage metrics up to date.""" |
56 | await self._update_gauges() | |
56 | run_as_background_process( | |
57 | desc="common_usage_metrics_update_gauges", func=self._update_gauges | |
58 | ) | |
57 | 59 | self._clock.looping_call( |
58 | 60 | run_as_background_process, |
59 | 61 | 5 * 60 * 1000, |
85 | 85 | ON_LOGGED_OUT_CALLBACK, |
86 | 86 | AuthHandler, |
87 | 87 | ) |
88 | from synapse.handlers.device import DeviceHandler | |
88 | 89 | from synapse.handlers.push_rules import RuleSpec, check_actions |
89 | 90 | from synapse.http.client import SimpleHttpClient |
90 | 91 | from synapse.http.server import ( |
206 | 207 | self._registration_handler = hs.get_registration_handler() |
207 | 208 | self._send_email_handler = hs.get_send_email_handler() |
208 | 209 | self._push_rules_handler = hs.get_push_rules_handler() |
210 | self._device_handler = hs.get_device_handler() | |
209 | 211 | self.custom_template_dir = hs.config.server.custom_template_directory |
210 | 212 | |
211 | 213 | try: |
783 | 785 | ) -> Generator["defer.Deferred[Any]", Any, None]: |
784 | 786 | """Invalidate an access token for a user |
785 | 787 | |
788 | Can only be called from the main process. | |
789 | ||
786 | 790 | Added in Synapse v0.25.0. |
787 | 791 | |
788 | 792 | Args: |
789 | access_token(str): access token | |
793 | access_token: access token | |
790 | 794 | |
791 | 795 | Returns: |
792 | 796 | twisted.internet.defer.Deferred - resolves once the access token |
795 | 799 | Raises: |
796 | 800 | synapse.api.errors.AuthError: the access token is invalid |
797 | 801 | """ |
802 | assert isinstance( | |
803 | self._device_handler, DeviceHandler | |
804 | ), "invalidate_access_token can only be called on the main process" | |
805 | ||
798 | 806 | # see if the access token corresponds to a device |
799 | 807 | user_info = yield defer.ensureDeferred( |
800 | 808 | self._auth.get_user_by_access_token(access_token) |
804 | 812 | if device_id: |
805 | 813 | # delete the device, which will also delete its access tokens |
806 | 814 | yield defer.ensureDeferred( |
807 | self._hs.get_device_handler().delete_devices(user_id, [device_id]) | |
815 | self._device_handler.delete_devices(user_id, [device_id]) | |
808 | 816 | ) |
809 | 817 | else: |
810 | 818 | # no associated device. Just delete the access token. |
831 | 839 | **kwargs: named args to be passed to func |
832 | 840 | |
833 | 841 | Returns: |
834 | Deferred[object]: result of func | |
842 | Result of func | |
835 | 843 | """ |
836 | 844 | # type-ignore: See https://github.com/python/mypy/issues/8862 |
837 | 845 | return defer.ensureDeferred( |
923 | 931 | to represent 'any') of the room state to acquire. |
924 | 932 | |
925 | 933 | Returns: |
926 | twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]: | |
927 | The filtered state events in the room. | |
934 | The filtered state events in the room. | |
928 | 935 | """ |
929 | 936 | state_ids = yield defer.ensureDeferred( |
930 | 937 | self._storage_controllers.state.get_current_state_ids( |
28 | 28 | from prometheus_client import Counter |
29 | 29 | |
30 | 30 | from synapse.api.constants import MAIN_TIMELINE, EventTypes, Membership, RelationTypes |
31 | from synapse.api.room_versions import PushRuleRoomFlag, RoomVersion | |
31 | 32 | from synapse.event_auth import auth_types_for_event, get_user_power_level |
32 | 33 | from synapse.events import EventBase, relation_from_event |
33 | 34 | from synapse.events.snapshot import EventContext |
337 | 338 | for user_id, level in notification_levels.items(): |
338 | 339 | notification_levels[user_id] = int(level) |
339 | 340 | |
341 | room_version_features = event.room_version.msc3931_push_features | |
342 | if not room_version_features: | |
343 | room_version_features = [] | |
344 | ||
340 | 345 | evaluator = PushRuleEvaluator( |
341 | _flatten_dict(event), | |
346 | _flatten_dict(event, room_version=event.room_version), | |
342 | 347 | room_member_count, |
343 | 348 | sender_power_level, |
344 | 349 | notification_levels, |
345 | 350 | related_events, |
346 | 351 | self._related_event_match_enabled, |
352 | room_version_features, | |
353 | self.hs.config.experimental.msc1767_enabled, # MSC3931 flag | |
347 | 354 | ) |
348 | 355 | |
349 | 356 | users = rules_by_user.keys() |
419 | 426 | |
420 | 427 | def _flatten_dict( |
421 | 428 | d: Union[EventBase, Mapping[str, Any]], |
429 | room_version: Optional[RoomVersion] = None, | |
422 | 430 | prefix: Optional[List[str]] = None, |
423 | 431 | result: Optional[Dict[str, str]] = None, |
424 | 432 | ) -> Dict[str, str]: |
430 | 438 | if isinstance(value, str): |
431 | 439 | result[".".join(prefix + [key])] = value.lower() |
432 | 440 | elif isinstance(value, Mapping): |
441 | # do not set `room_version` due to recursion considerations below | |
433 | 442 | _flatten_dict(value, prefix=(prefix + [key]), result=result) |
434 | 443 | |
444 | # `room_version` should only ever be set when looking at the top level of an event | |
445 | if ( | |
446 | room_version is not None | |
447 | and PushRuleRoomFlag.EXTENSIBLE_EVENTS in room_version.msc3931_push_features | |
448 | and isinstance(d, EventBase) | |
449 | ): | |
450 | # Room supports extensible events: replace `content.body` with the plain text | |
451 | # representation from `m.markup`, as per MSC1767. | |
452 | markup = d.get("content").get("m.markup") | |
453 | if room_version.identifier.startswith("org.matrix.msc1767."): | |
454 | markup = d.get("content").get("org.matrix.msc1767.markup") | |
455 | if markup is not None and isinstance(markup, list): | |
456 | text = "" | |
457 | for rep in markup: | |
458 | if not isinstance(rep, dict): | |
459 | # invalid markup - skip all processing | |
460 | break | |
461 | if rep.get("mimetype", "text/plain") == "text/plain": | |
462 | rep_text = rep.get("body") | |
463 | if rep_text is not None and isinstance(rep_text, str): | |
464 | text = rep_text.lower() | |
465 | break | |
466 | result["content.body"] = text | |
467 | ||
435 | 468 | return result |
152 | 152 | argument list. |
153 | 153 | |
154 | 154 | Returns: |
155 | dict: If POST/PUT request then dictionary must be JSON serialisable, | |
155 | If POST/PUT request then dictionary must be JSON serialisable, | |
156 | 156 | otherwise must be appropriate for adding as query args. |
157 | 157 | """ |
158 | 158 | return {} |
12 | 12 | # limitations under the License. |
13 | 13 | |
14 | 14 | import logging |
15 | from typing import TYPE_CHECKING, Tuple | |
15 | from typing import TYPE_CHECKING, Optional, Tuple | |
16 | 16 | |
17 | 17 | from twisted.web.server import Request |
18 | 18 | |
19 | 19 | from synapse.http.server import HttpServer |
20 | from synapse.http.servlet import parse_json_object_from_request | |
20 | 21 | from synapse.replication.http._base import ReplicationEndpoint |
21 | 22 | from synapse.types import JsonDict |
22 | 23 | |
61 | 62 | def __init__(self, hs: "HomeServer"): |
62 | 63 | super().__init__(hs) |
63 | 64 | |
64 | self.device_list_updater = hs.get_device_handler().device_list_updater | |
65 | from synapse.handlers.device import DeviceHandler | |
66 | ||
67 | handler = hs.get_device_handler() | |
68 | assert isinstance(handler, DeviceHandler) | |
69 | self.device_list_updater = handler.device_list_updater | |
70 | ||
65 | 71 | self.store = hs.get_datastores().main |
66 | 72 | self.clock = hs.get_clock() |
67 | 73 | |
71 | 77 | |
72 | 78 | async def _handle_request( # type: ignore[override] |
73 | 79 | self, request: Request, user_id: str |
74 | ) -> Tuple[int, JsonDict]: | |
80 | ) -> Tuple[int, Optional[JsonDict]]: | |
75 | 81 | user_devices = await self.device_list_updater.user_device_resync(user_id) |
76 | 82 | |
77 | 83 | return 200, user_devices |
78 | 84 | |
79 | 85 | |
86 | class ReplicationUploadKeysForUserRestServlet(ReplicationEndpoint): | |
87 | """Ask master to upload keys for the user and send them out over federation to | |
88 | update other servers. | |
89 | ||
90 | For now, only the master is permitted to handle key upload requests; | |
91 | any worker can handle key query requests (since they're read-only). | |
92 | ||
93 | Calls to e2e_keys_handler.upload_keys_for_user(user_id, device_id, keys) on | |
94 | the main process to accomplish this. | |
95 | ||
96 | Defined in https://spec.matrix.org/v1.4/client-server-api/#post_matrixclientv3keysupload | |
97 | Request format(borrowed and expanded from KeyUploadServlet): | |
98 | ||
99 | POST /_synapse/replication/upload_keys_for_user | |
100 | ||
101 | { | |
102 | "user_id": "<user_id>", | |
103 | "device_id": "<device_id>", | |
104 | "keys": { | |
105 | ....this part can be found in KeyUploadServlet in rest/client/keys.py.... | |
106 | } | |
107 | } | |
108 | ||
109 | Response is equivalent to ` /_matrix/client/v3/keys/upload` found in KeyUploadServlet | |
110 | ||
111 | """ | |
112 | ||
113 | NAME = "upload_keys_for_user" | |
114 | PATH_ARGS = () | |
115 | CACHE = False | |
116 | ||
117 | def __init__(self, hs: "HomeServer"): | |
118 | super().__init__(hs) | |
119 | ||
120 | self.e2e_keys_handler = hs.get_e2e_keys_handler() | |
121 | self.store = hs.get_datastores().main | |
122 | self.clock = hs.get_clock() | |
123 | ||
124 | @staticmethod | |
125 | async def _serialize_payload( # type: ignore[override] | |
126 | user_id: str, device_id: str, keys: JsonDict | |
127 | ) -> JsonDict: | |
128 | ||
129 | return { | |
130 | "user_id": user_id, | |
131 | "device_id": device_id, | |
132 | "keys": keys, | |
133 | } | |
134 | ||
135 | async def _handle_request( # type: ignore[override] | |
136 | self, request: Request | |
137 | ) -> Tuple[int, JsonDict]: | |
138 | content = parse_json_object_from_request(request) | |
139 | ||
140 | user_id = content["user_id"] | |
141 | device_id = content["device_id"] | |
142 | keys = content["keys"] | |
143 | ||
144 | results = await self.e2e_keys_handler.upload_keys_for_user( | |
145 | user_id, device_id, keys | |
146 | ) | |
147 | ||
148 | return 200, results | |
149 | ||
150 | ||
80 | 151 | def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: |
81 | 152 | ReplicationUserDevicesResyncRestServlet(hs).register(http_server) |
153 | ReplicationUploadKeysForUserRestServlet(hs).register(http_server) |
0 | # Copyright 2016 OpenMarket Ltd | |
1 | # | |
2 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | # you may not use this file except in compliance with the License. | |
4 | # You may obtain a copy of the License at | |
5 | # | |
6 | # http://www.apache.org/licenses/LICENSE-2.0 | |
7 | # | |
8 | # Unless required by applicable law or agreed to in writing, software | |
9 | # distributed under the License is distributed on an "AS IS" BASIS, | |
10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | # See the License for the specific language governing permissions and | |
12 | # limitations under the License. |
0 | # Copyright 2016 OpenMarket Ltd | |
1 | # | |
2 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | # you may not use this file except in compliance with the License. | |
4 | # You may obtain a copy of the License at | |
5 | # | |
6 | # http://www.apache.org/licenses/LICENSE-2.0 | |
7 | # | |
8 | # Unless required by applicable law or agreed to in writing, software | |
9 | # distributed under the License is distributed on an "AS IS" BASIS, | |
10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | # See the License for the specific language governing permissions and | |
12 | # limitations under the License. |
0 | # Copyright 2016 OpenMarket Ltd | |
1 | # | |
2 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | # you may not use this file except in compliance with the License. | |
4 | # You may obtain a copy of the License at | |
5 | # | |
6 | # http://www.apache.org/licenses/LICENSE-2.0 | |
7 | # | |
8 | # Unless required by applicable law or agreed to in writing, software | |
9 | # distributed under the License is distributed on an "AS IS" BASIS, | |
10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | # See the License for the specific language governing permissions and | |
12 | # limitations under the License. | |
13 | from typing import List, Optional, Tuple | |
14 | ||
15 | from synapse.storage.database import LoggingDatabaseConnection | |
16 | from synapse.storage.util.id_generators import AbstractStreamIdTracker, _load_current_id | |
17 | ||
18 | ||
19 | class SlavedIdTracker(AbstractStreamIdTracker): | |
20 | """Tracks the "current" stream ID of a stream with a single writer. | |
21 | ||
22 | See `AbstractStreamIdTracker` for more details. | |
23 | ||
24 | Note that this class does not work correctly when there are multiple | |
25 | writers. | |
26 | """ | |
27 | ||
28 | def __init__( | |
29 | self, | |
30 | db_conn: LoggingDatabaseConnection, | |
31 | table: str, | |
32 | column: str, | |
33 | extra_tables: Optional[List[Tuple[str, str]]] = None, | |
34 | step: int = 1, | |
35 | ): | |
36 | self.step = step | |
37 | self._current = _load_current_id(db_conn, table, column, step) | |
38 | if extra_tables: | |
39 | for table, column in extra_tables: | |
40 | self.advance(None, _load_current_id(db_conn, table, column)) | |
41 | ||
42 | def advance(self, instance_name: Optional[str], new_id: int) -> None: | |
43 | self._current = (max if self.step > 0 else min)(self._current, new_id) | |
44 | ||
45 | def get_current_token(self) -> int: | |
46 | return self._current | |
47 | ||
48 | def get_current_token_for_writer(self, instance_name: str) -> int: | |
49 | return self.get_current_token() |
244 | 244 | self._parse_and_dispatch_line(line) |
245 | 245 | |
246 | 246 | def _parse_and_dispatch_line(self, line: bytes) -> None: |
247 | if line.strip() == "": | |
247 | if line.strip() == b"": | |
248 | 248 | # Ignore blank lines |
249 | 249 | return |
250 | 250 |
237 | 237 | """ |
238 | 238 | Register all the admin servlets. |
239 | 239 | """ |
240 | # Admin servlets aren't registered on workers. | |
241 | if hs.config.worker.worker_app is not None: | |
242 | return | |
243 | ||
240 | 244 | register_servlets_for_client_rest_resource(hs, http_server) |
241 | 245 | BlockRoomRestServlet(hs).register(http_server) |
242 | 246 | ListRoomRestServlet(hs).register(http_server) |
253 | 257 | UserTokenRestServlet(hs).register(http_server) |
254 | 258 | UserRestServletV2(hs).register(http_server) |
255 | 259 | UsersRestServletV2(hs).register(http_server) |
256 | DeviceRestServlet(hs).register(http_server) | |
257 | DevicesRestServlet(hs).register(http_server) | |
258 | DeleteDevicesRestServlet(hs).register(http_server) | |
259 | 260 | UserMediaStatisticsRestServlet(hs).register(http_server) |
260 | 261 | EventReportDetailRestServlet(hs).register(http_server) |
261 | 262 | EventReportsRestServlet(hs).register(http_server) |
279 | 280 | UserByExternalId(hs).register(http_server) |
280 | 281 | UserByThreePid(hs).register(http_server) |
281 | 282 | |
282 | # Some servlets only get registered for the main process. | |
283 | if hs.config.worker.worker_app is None: | |
284 | SendServerNoticeServlet(hs).register(http_server) | |
285 | BackgroundUpdateEnabledRestServlet(hs).register(http_server) | |
286 | BackgroundUpdateRestServlet(hs).register(http_server) | |
287 | BackgroundUpdateStartJobRestServlet(hs).register(http_server) | |
283 | DeviceRestServlet(hs).register(http_server) | |
284 | DevicesRestServlet(hs).register(http_server) | |
285 | DeleteDevicesRestServlet(hs).register(http_server) | |
286 | SendServerNoticeServlet(hs).register(http_server) | |
287 | BackgroundUpdateEnabledRestServlet(hs).register(http_server) | |
288 | BackgroundUpdateRestServlet(hs).register(http_server) | |
289 | BackgroundUpdateStartJobRestServlet(hs).register(http_server) | |
288 | 290 | |
289 | 291 | |
290 | 292 | def register_servlets_for_client_rest_resource( |
293 | 295 | """Register only the servlets which need to be exposed on /_matrix/client/xxx""" |
294 | 296 | WhoisRestServlet(hs).register(http_server) |
295 | 297 | PurgeHistoryStatusRestServlet(hs).register(http_server) |
296 | DeactivateAccountRestServlet(hs).register(http_server) | |
297 | 298 | PurgeHistoryRestServlet(hs).register(http_server) |
298 | ResetPasswordRestServlet(hs).register(http_server) | |
299 | # The following resources can only be run on the main process. | |
300 | if hs.config.worker.worker_app is None: | |
301 | DeactivateAccountRestServlet(hs).register(http_server) | |
302 | ResetPasswordRestServlet(hs).register(http_server) | |
299 | 303 | SearchUsersRestServlet(hs).register(http_server) |
300 | 304 | UserRegisterServlet(hs).register(http_server) |
301 | 305 | AccountValidityRenewServlet(hs).register(http_server) |
15 | 15 | from typing import TYPE_CHECKING, Tuple |
16 | 16 | |
17 | 17 | from synapse.api.errors import NotFoundError, SynapseError |
18 | from synapse.handlers.device import DeviceHandler | |
18 | 19 | from synapse.http.servlet import ( |
19 | 20 | RestServlet, |
20 | 21 | assert_params_in_dict, |
42 | 43 | def __init__(self, hs: "HomeServer"): |
43 | 44 | super().__init__() |
44 | 45 | self.auth = hs.get_auth() |
45 | self.device_handler = hs.get_device_handler() | |
46 | handler = hs.get_device_handler() | |
47 | assert isinstance(handler, DeviceHandler) | |
48 | self.device_handler = handler | |
46 | 49 | self.store = hs.get_datastores().main |
47 | 50 | self.is_mine = hs.is_mine |
48 | 51 | |
111 | 114 | |
112 | 115 | def __init__(self, hs: "HomeServer"): |
113 | 116 | self.auth = hs.get_auth() |
114 | self.device_handler = hs.get_device_handler() | |
117 | handler = hs.get_device_handler() | |
118 | assert isinstance(handler, DeviceHandler) | |
119 | self.device_handler = handler | |
115 | 120 | self.store = hs.get_datastores().main |
116 | 121 | self.is_mine = hs.is_mine |
117 | 122 | |
142 | 147 | |
143 | 148 | def __init__(self, hs: "HomeServer"): |
144 | 149 | self.auth = hs.get_auth() |
145 | self.device_handler = hs.get_device_handler() | |
150 | handler = hs.get_device_handler() | |
151 | assert isinstance(handler, DeviceHandler) | |
152 | self.device_handler = handler | |
146 | 153 | self.store = hs.get_datastores().main |
147 | 154 | self.is_mine = hs.is_mine |
148 | 155 |
902 | 902 | @user:server/pushers |
903 | 903 | |
904 | 904 | Returns: |
905 | pushers: Dictionary containing pushers information. | |
906 | total: Number of pushers in dictionary `pushers`. | |
905 | A dictionary with keys: | |
906 | pushers: Dictionary containing pushers information. | |
907 | total: Number of pushers in dictionary `pushers`. | |
907 | 908 | """ |
908 | 909 | |
909 | 910 | PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$") |
19 | 19 | |
20 | 20 | from synapse.api import errors |
21 | 21 | from synapse.api.errors import NotFoundError |
22 | from synapse.handlers.device import DeviceHandler | |
22 | 23 | from synapse.http.server import HttpServer |
23 | 24 | from synapse.http.servlet import ( |
24 | 25 | RestServlet, |
79 | 80 | super().__init__() |
80 | 81 | self.hs = hs |
81 | 82 | self.auth = hs.get_auth() |
82 | self.device_handler = hs.get_device_handler() | |
83 | handler = hs.get_device_handler() | |
84 | assert isinstance(handler, DeviceHandler) | |
85 | self.device_handler = handler | |
83 | 86 | self.auth_handler = hs.get_auth_handler() |
84 | 87 | |
85 | 88 | class PostBody(RequestBodyModel): |
124 | 127 | super().__init__() |
125 | 128 | self.hs = hs |
126 | 129 | self.auth = hs.get_auth() |
127 | self.device_handler = hs.get_device_handler() | |
130 | handler = hs.get_device_handler() | |
131 | assert isinstance(handler, DeviceHandler) | |
132 | self.device_handler = handler | |
128 | 133 | self.auth_handler = hs.get_auth_handler() |
129 | 134 | self._msc3852_enabled = hs.config.experimental.msc3852_enabled |
130 | 135 | |
255 | 260 | super().__init__() |
256 | 261 | self.hs = hs |
257 | 262 | self.auth = hs.get_auth() |
258 | self.device_handler = hs.get_device_handler() | |
263 | handler = hs.get_device_handler() | |
264 | assert isinstance(handler, DeviceHandler) | |
265 | self.device_handler = handler | |
259 | 266 | |
260 | 267 | async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: |
261 | 268 | requester = await self.auth.get_user_by_req(request) |
312 | 319 | super().__init__() |
313 | 320 | self.hs = hs |
314 | 321 | self.auth = hs.get_auth() |
315 | self.device_handler = hs.get_device_handler() | |
322 | handler = hs.get_device_handler() | |
323 | assert isinstance(handler, DeviceHandler) | |
324 | self.device_handler = handler | |
316 | 325 | |
317 | 326 | class PostBody(RequestBodyModel): |
318 | 327 | device_id: StrictStr |
26 | 26 | ) |
27 | 27 | from synapse.http.site import SynapseRequest |
28 | 28 | from synapse.logging.opentracing import log_kv, set_tag |
29 | from synapse.replication.http.devices import ReplicationUploadKeysForUserRestServlet | |
29 | 30 | from synapse.rest.client._base import client_patterns, interactive_auth_handler |
30 | 31 | from synapse.types import JsonDict, StreamToken |
31 | 32 | from synapse.util.cancellation import cancellable |
42 | 43 | Content-Type: application/json |
43 | 44 | |
44 | 45 | { |
45 | "device_keys": { | |
46 | "user_id": "<user_id>", | |
47 | "device_id": "<device_id>", | |
48 | "valid_until_ts": <millisecond_timestamp>, | |
49 | "algorithms": [ | |
50 | "m.olm.curve25519-aes-sha2", | |
51 | ] | |
52 | "keys": { | |
53 | "<algorithm>:<device_id>": "<key_base64>", | |
46 | "device_keys": { | |
47 | "user_id": "<user_id>", | |
48 | "device_id": "<device_id>", | |
49 | "valid_until_ts": <millisecond_timestamp>, | |
50 | "algorithms": [ | |
51 | "m.olm.curve25519-aes-sha2", | |
52 | ] | |
53 | "keys": { | |
54 | "<algorithm>:<device_id>": "<key_base64>", | |
55 | }, | |
56 | "signatures:" { | |
57 | "<user_id>" { | |
58 | "<algorithm>:<device_id>": "<signature_base64>" | |
59 | } | |
60 | } | |
54 | 61 | }, |
55 | "signatures:" { | |
56 | "<user_id>" { | |
57 | "<algorithm>:<device_id>": "<signature_base64>" | |
58 | } } }, | |
59 | "one_time_keys": { | |
60 | "<algorithm>:<key_id>": "<key_base64>" | |
61 | }, | |
62 | "fallback_keys": { | |
63 | "<algorithm>:<device_id>": "<key_base64>", | |
64 | "signed_<algorithm>:<device_id>": { | |
65 | "fallback": true, | |
66 | "key": "<key_base64>", | |
67 | "signatures": { | |
68 | "<user_id>": { | |
69 | "<algorithm>:<device_id>": "<key_base64>" | |
70 | } | |
71 | } | |
72 | } | |
73 | } | |
74 | "one_time_keys": { | |
75 | "<algorithm>:<key_id>": "<key_base64>" | |
76 | }, | |
62 | 77 | } |
78 | ||
79 | response, e.g.: | |
80 | ||
81 | { | |
82 | "one_time_key_counts": { | |
83 | "curve25519": 10, | |
84 | "signed_curve25519": 20 | |
85 | } | |
86 | } | |
87 | ||
63 | 88 | """ |
64 | 89 | |
65 | 90 | PATTERNS = client_patterns("/keys/upload(/(?P<device_id>[^/]+))?$") |
69 | 94 | self.auth = hs.get_auth() |
70 | 95 | self.e2e_keys_handler = hs.get_e2e_keys_handler() |
71 | 96 | self.device_handler = hs.get_device_handler() |
97 | ||
98 | if hs.config.worker.worker_app is None: | |
99 | # if main process | |
100 | self.key_uploader = self.e2e_keys_handler.upload_keys_for_user | |
101 | else: | |
102 | # then a worker | |
103 | self.key_uploader = ReplicationUploadKeysForUserRestServlet.make_client(hs) | |
72 | 104 | |
73 | 105 | async def on_POST( |
74 | 106 | self, request: SynapseRequest, device_id: Optional[str] |
108 | 140 | 400, "To upload keys, you must pass device_id when authenticating" |
109 | 141 | ) |
110 | 142 | |
111 | result = await self.e2e_keys_handler.upload_keys_for_user( | |
112 | user_id, device_id, body | |
143 | result = await self.key_uploader( | |
144 | user_id=user_id, device_id=device_id, keys=body | |
113 | 145 | ) |
114 | 146 | return 200, result |
115 | 147 |
349 | 349 | auth_provider_session_id: The session ID got during login from the SSO IdP. |
350 | 350 | |
351 | 351 | Returns: |
352 | result: Dictionary of account information after successful login. | |
352 | Dictionary of account information after successful login. | |
353 | 353 | """ |
354 | 354 | |
355 | 355 | # Before we actually log them in we check if they've already logged in |
14 | 14 | import logging |
15 | 15 | from typing import TYPE_CHECKING, Tuple |
16 | 16 | |
17 | from synapse.handlers.device import DeviceHandler | |
17 | 18 | from synapse.http.server import HttpServer |
18 | 19 | from synapse.http.servlet import RestServlet |
19 | 20 | from synapse.http.site import SynapseRequest |
33 | 34 | super().__init__() |
34 | 35 | self.auth = hs.get_auth() |
35 | 36 | self._auth_handler = hs.get_auth_handler() |
36 | self._device_handler = hs.get_device_handler() | |
37 | handler = hs.get_device_handler() | |
38 | assert isinstance(handler, DeviceHandler) | |
39 | self._device_handler = handler | |
37 | 40 | |
38 | 41 | async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: |
39 | 42 | requester = await self.auth.get_user_by_req(request, allow_expired=True) |
58 | 61 | super().__init__() |
59 | 62 | self.auth = hs.get_auth() |
60 | 63 | self._auth_handler = hs.get_auth_handler() |
61 | self._device_handler = hs.get_device_handler() | |
64 | handler = hs.get_device_handler() | |
65 | assert isinstance(handler, DeviceHandler) | |
66 | self._device_handler = handler | |
62 | 67 | |
63 | 68 | async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: |
64 | 69 | requester = await self.auth.get_user_by_req(request, allow_expired=True) |
1283 | 1283 | `dir` can be `f` or `b` to indicate forwards and backwards in time from the |
1284 | 1284 | given timestamp. |
1285 | 1285 | |
1286 | GET /_matrix/client/unstable/org.matrix.msc3030/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction> | |
1286 | GET /_matrix/client/v1/rooms/<roomID>/timestamp_to_event?ts=<timestamp>&dir=<direction> | |
1287 | 1287 | { |
1288 | 1288 | "event_id": ... |
1289 | 1289 | } |
1290 | 1290 | """ |
1291 | 1291 | |
1292 | 1292 | PATTERNS = ( |
1293 | re.compile( | |
1294 | "^/_matrix/client/unstable/org.matrix.msc3030" | |
1295 | "/rooms/(?P<room_id>[^/]*)/timestamp_to_event$" | |
1296 | ), | |
1293 | re.compile("^/_matrix/client/v1/rooms/(?P<room_id>[^/]*)/timestamp_to_event$"), | |
1297 | 1294 | ) |
1298 | 1295 | |
1299 | 1296 | def __init__(self, hs: "HomeServer"): |
1420 | 1417 | RoomAliasListServlet(hs).register(http_server) |
1421 | 1418 | SearchRestServlet(hs).register(http_server) |
1422 | 1419 | RoomCreateRestServlet(hs).register(http_server) |
1423 | if hs.config.experimental.msc3030_enabled: | |
1424 | TimestampLookupRestServlet(hs).register(http_server) | |
1420 | TimestampLookupRestServlet(hs).register(http_server) | |
1425 | 1421 | |
1426 | 1422 | # Some servlets only get registered for the main process. |
1427 | 1423 | if not is_worker: |
100 | 100 | "org.matrix.msc3827.stable": True, |
101 | 101 | # Adds support for importing historical messages as per MSC2716 |
102 | 102 | "org.matrix.msc2716": self.config.experimental.msc2716_enabled, |
103 | # Adds support for jump to date endpoints (/timestamp_to_event) as per MSC3030 | |
104 | "org.matrix.msc3030": self.config.experimental.msc3030_enabled, | |
105 | 103 | # Adds support for thread relations, per MSC3440. |
106 | 104 | "org.matrix.msc3440.stable": True, # TODO: remove when "v1.3" is added above |
107 | 105 | # Support for thread read receipts & notification counts. |
343 | 343 | download from remote server. |
344 | 344 | |
345 | 345 | Args: |
346 | server_name (str): Remote server_name where the media originated. | |
347 | media_id (str): The media ID of the content (as defined by the | |
346 | server_name: Remote server_name where the media originated. | |
347 | media_id: The media ID of the content (as defined by the | |
348 | 348 | remote server). |
349 | 349 | |
350 | 350 | Returns: |
137 | 137 | """Rescales the image to the given dimensions. |
138 | 138 | |
139 | 139 | Returns: |
140 | BytesIO: the bytes of the encoded image ready to be written to disk | |
140 | The bytes of the encoded image ready to be written to disk | |
141 | 141 | """ |
142 | 142 | with self._resize(width, height) as scaled: |
143 | 143 | return self._encode_image(scaled, output_type) |
154 | 154 | max_height: The largest possible height. |
155 | 155 | |
156 | 156 | Returns: |
157 | BytesIO: the bytes of the encoded image ready to be written to disk | |
157 | The bytes of the encoded image ready to be written to disk | |
158 | 158 | """ |
159 | 159 | if width * self.height > height * self.width: |
160 | 160 | scaled_width = width |
509 | 509 | ) |
510 | 510 | |
511 | 511 | @cache_in_self |
512 | def get_device_handler(self): | |
512 | def get_device_handler(self) -> DeviceWorkerHandler: | |
513 | 513 | if self.config.worker.worker_app: |
514 | 514 | return DeviceWorkerHandler(self) |
515 | 515 | else: |
112 | 112 | """Deep-copy a structure, carrying out string substitutions on any strings |
113 | 113 | |
114 | 114 | Args: |
115 | x (object): structure to be copied | |
116 | substitutions (object): substitutions to be made - passed into the | |
117 | string '%' operator | |
115 | x: structure to be copied | |
116 | substitutions: substitutions to be made - passed into the string '%' operator | |
118 | 117 | |
119 | 118 | Returns: |
120 | 119 | copy of x |
169 | 169 | room_id: The room id of the server notices room |
170 | 170 | |
171 | 171 | Returns: |
172 | bool: Is the room currently blocked | |
173 | list: The list of pinned event IDs that are unrelated to limit blocking | |
174 | This list can be used as a convenience in the case where the block | |
175 | is to be lifted and the remaining pinned event references need to be | |
176 | preserved | |
172 | Tuple of: | |
173 | Is the room currently blocked | |
174 | ||
175 | The list of pinned event IDs that are unrelated to limit blocking | |
176 | This list can be used as a convenience in the case where the block | |
177 | is to be lifted and the remaining pinned event references need to be | |
178 | preserved | |
177 | 179 | """ |
178 | 180 | currently_blocked = False |
179 | 181 | pinned_state_event = None |
189 | 189 | room_id: str, |
190 | 190 | event_ids: Collection[str], |
191 | 191 | state_filter: Optional[StateFilter] = None, |
192 | await_full_state: bool = True, | |
192 | 193 | ) -> StateMap[str]: |
193 | 194 | """Fetch the state after each of the given event IDs. Resolve them and return. |
194 | 195 | |
199 | 200 | Args: |
200 | 201 | room_id: the room_id containing the given events. |
201 | 202 | event_ids: the events whose state should be fetched and resolved. |
203 | await_full_state: if `True`, will block if we do not yet have complete state | |
204 | at the given `event_id`s, regardless of whether `state_filter` is | |
205 | satisfied by partial state. | |
202 | 206 | |
203 | 207 | Returns: |
204 | 208 | the state dict (a mapping from (event_type, state_key) -> event_id) which |
205 | 209 | holds the resolution of the states after the given event IDs. |
206 | 210 | """ |
207 | 211 | logger.debug("calling resolve_state_groups from compute_state_after_events") |
208 | ret = await self.resolve_state_groups_for_events(room_id, event_ids) | |
212 | ret = await self.resolve_state_groups_for_events( | |
213 | room_id, event_ids, await_full_state | |
214 | ) | |
209 | 215 | return await ret.get_state(self._state_storage_controller, state_filter) |
210 | 216 | |
211 | 217 | async def get_current_user_ids_in_room( |
203 | 203 | process to to so, calling the per_item_callback for each item. |
204 | 204 | |
205 | 205 | Args: |
206 | room_id (str): | |
207 | task (_EventPersistQueueTask): A _PersistEventsTask or | |
208 | _UpdateCurrentStateTask to process. | |
206 | room_id: | |
207 | task: A _PersistEventsTask or _UpdateCurrentStateTask to process. | |
209 | 208 | |
210 | 209 | Returns: |
211 | 210 | the result returned by the `_per_item_callback` passed to |
568 | 568 | retcols=["update_name"], |
569 | 569 | desc="check_background_updates", |
570 | 570 | ) |
571 | updates = [x["update_name"] for x in updates] | |
571 | background_update_names = [x["update_name"] for x in updates] | |
572 | 572 | |
573 | 573 | for table, update_name in UNIQUE_INDEX_BACKGROUND_UPDATES.items(): |
574 | if update_name not in updates: | |
574 | if update_name not in background_update_names: | |
575 | 575 | logger.debug("Now safe to upsert in %s", table) |
576 | 576 | self._unsafe_to_upsert_tables.discard(table) |
577 | 577 | |
578 | 578 | # If there's any updates still running, reschedule to run. |
579 | if updates: | |
579 | if background_update_names: | |
580 | 580 | self._clock.call_later( |
581 | 581 | 15.0, |
582 | 582 | run_as_background_process, |
1128 | 1128 | values: Dict[str, Any], |
1129 | 1129 | insertion_values: Optional[Dict[str, Any]] = None, |
1130 | 1130 | desc: str = "simple_upsert", |
1131 | lock: bool = True, | |
1132 | 1131 | ) -> bool: |
1133 | 1132 | """Insert a row with values + insertion_values; on conflict, update with values. |
1134 | 1133 | |
1153 | 1152 | requiring that a unique index exist on the column names used to detect a |
1154 | 1153 | conflict (i.e. `keyvalues.keys()`). |
1155 | 1154 | |
1156 | If there is no such index, we can "emulate" an upsert with a SELECT followed | |
1157 | by either an INSERT or an UPDATE. This is unsafe: we cannot make the same | |
1158 | atomicity guarantees that a native upsert can and are very vulnerable to races | |
1159 | and crashes. Therefore if we wish to upsert without an appropriate unique index, | |
1160 | we must either: | |
1161 | ||
1162 | 1. Acquire a table-level lock before the emulated upsert (`lock=True`), or | |
1163 | 2. VERY CAREFULLY ensure that we are the only thread and worker which will be | |
1164 | writing to this table, in which case we can proceed without a lock | |
1165 | (`lock=False`). | |
1166 | ||
1167 | Generally speaking, you should use `lock=True`. If the table in question has a | |
1168 | unique index[*], this class will use a native upsert (which is atomic and so can | |
1169 | ignore the `lock` argument). Otherwise this class will use an emulated upsert, | |
1170 | in which case we want the safer option unless we been VERY CAREFUL. | |
1155 | If there is no such index yet[*], we can "emulate" an upsert with a SELECT | |
1156 | followed by either an INSERT or an UPDATE. This is unsafe unless *all* upserters | |
1157 | run at the SERIALIZABLE isolation level: we cannot make the same atomicity | |
1158 | guarantees that a native upsert can and are very vulnerable to races and | |
1159 | crashes. Therefore to upsert without an appropriate unique index, we acquire a | |
1160 | table-level lock before the emulated upsert. | |
1171 | 1161 | |
1172 | 1162 | [*]: Some tables have unique indices added to them in the background. Those |
1173 | 1163 | tables `T` are keys in the dictionary UNIQUE_INDEX_BACKGROUND_UPDATES, |
1188 | 1178 | values: The nonunique columns and their new values |
1189 | 1179 | insertion_values: additional key/values to use only when inserting |
1190 | 1180 | desc: description of the transaction, for logging and metrics |
1191 | lock: True to lock the table when doing the upsert. | |
1192 | 1181 | Returns: |
1193 | 1182 | Returns True if a row was inserted or updated (i.e. if `values` is |
1194 | 1183 | not empty then this always returns True) |
1208 | 1197 | keyvalues, |
1209 | 1198 | values, |
1210 | 1199 | insertion_values, |
1211 | lock=lock, | |
1212 | 1200 | db_autocommit=autocommit, |
1213 | 1201 | ) |
1214 | 1202 | except self.engine.module.IntegrityError as e: |
1231 | 1219 | values: Dict[str, Any], |
1232 | 1220 | insertion_values: Optional[Dict[str, Any]] = None, |
1233 | 1221 | where_clause: Optional[str] = None, |
1234 | lock: bool = True, | |
1235 | 1222 | ) -> bool: |
1236 | 1223 | """ |
1237 | 1224 | Pick the UPSERT method which works best on the platform. Either the |
1244 | 1231 | values: The nonunique columns and their new values |
1245 | 1232 | insertion_values: additional key/values to use only when inserting |
1246 | 1233 | where_clause: An index predicate to apply to the upsert. |
1247 | lock: True to lock the table when doing the upsert. Unused when performing | |
1248 | a native upsert. | |
1249 | 1234 | Returns: |
1250 | 1235 | Returns True if a row was inserted or updated (i.e. if `values` is |
1251 | 1236 | not empty then this always returns True) |
1269 | 1254 | values, |
1270 | 1255 | insertion_values=insertion_values, |
1271 | 1256 | where_clause=where_clause, |
1272 | lock=lock, | |
1273 | 1257 | ) |
1274 | 1258 | |
1275 | 1259 | def simple_upsert_txn_emulated( |
1290 | 1274 | insertion_values: additional key/values to use only when inserting |
1291 | 1275 | where_clause: An index predicate to apply to the upsert. |
1292 | 1276 | lock: True to lock the table when doing the upsert. |
1277 | Must not be False unless the table has already been locked. | |
1293 | 1278 | Returns: |
1294 | 1279 | Returns True if a row was inserted or updated (i.e. if `values` is |
1295 | 1280 | not empty then this always returns True) |
1296 | 1281 | """ |
1297 | 1282 | insertion_values = insertion_values or {} |
1298 | 1283 | |
1299 | # We need to lock the table :(, unless we're *really* careful | |
1300 | 1284 | if lock: |
1285 | # We need to lock the table :( | |
1301 | 1286 | self.engine.lock_table(txn, table) |
1302 | 1287 | |
1303 | 1288 | def _getwhere(key: str) -> str: |
1405 | 1390 | value_names: Collection[str], |
1406 | 1391 | value_values: Collection[Collection[Any]], |
1407 | 1392 | desc: str, |
1408 | lock: bool = True, | |
1409 | 1393 | ) -> None: |
1410 | 1394 | """ |
1411 | 1395 | Upsert, many times. |
1417 | 1401 | value_names: The value column names |
1418 | 1402 | value_values: A list of each row's value column values. |
1419 | 1403 | Ignored if value_names is empty. |
1420 | lock: True to lock the table when doing the upsert. Unused when performing | |
1421 | a native upsert. | |
1422 | 1404 | """ |
1423 | 1405 | |
1424 | 1406 | # We can autocommit if it safe to upsert |
1432 | 1414 | key_values, |
1433 | 1415 | value_names, |
1434 | 1416 | value_values, |
1435 | lock=lock, | |
1436 | 1417 | db_autocommit=autocommit, |
1437 | 1418 | ) |
1438 | 1419 | |
1444 | 1425 | key_values: Collection[Iterable[Any]], |
1445 | 1426 | value_names: Collection[str], |
1446 | 1427 | value_values: Iterable[Iterable[Any]], |
1447 | lock: bool = True, | |
1448 | 1428 | ) -> None: |
1449 | 1429 | """ |
1450 | 1430 | Upsert, many times. |
1456 | 1436 | value_names: The value column names |
1457 | 1437 | value_values: A list of each row's value column values. |
1458 | 1438 | Ignored if value_names is empty. |
1459 | lock: True to lock the table when doing the upsert. Unused when performing | |
1460 | a native upsert. | |
1461 | 1439 | """ |
1462 | 1440 | if table not in self._unsafe_to_upsert_tables: |
1463 | 1441 | return self.simple_upsert_many_txn_native_upsert( |
1465 | 1443 | ) |
1466 | 1444 | else: |
1467 | 1445 | return self.simple_upsert_many_txn_emulated( |
1468 | txn, table, key_names, key_values, value_names, value_values, lock=lock | |
1446 | txn, | |
1447 | table, | |
1448 | key_names, | |
1449 | key_values, | |
1450 | value_names, | |
1451 | value_values, | |
1469 | 1452 | ) |
1470 | 1453 | |
1471 | 1454 | def simple_upsert_many_txn_emulated( |
1476 | 1459 | key_values: Collection[Iterable[Any]], |
1477 | 1460 | value_names: Collection[str], |
1478 | 1461 | value_values: Iterable[Iterable[Any]], |
1479 | lock: bool = True, | |
1480 | 1462 | ) -> None: |
1481 | 1463 | """ |
1482 | 1464 | Upsert, many times, but without native UPSERT support or batching. |
1488 | 1470 | value_names: The value column names |
1489 | 1471 | value_values: A list of each row's value column values. |
1490 | 1472 | Ignored if value_names is empty. |
1491 | lock: True to lock the table when doing the upsert. | |
1492 | 1473 | """ |
1493 | 1474 | # No value columns, therefore make a blank list so that the following |
1494 | 1475 | # zip() works correctly. |
1495 | 1476 | if not value_names: |
1496 | 1477 | value_values = [() for x in range(len(key_values))] |
1497 | 1478 | |
1498 | if lock: | |
1499 | # Lock the table just once, to prevent it being done once per row. | |
1500 | # Note that, according to Postgres' documentation, once obtained, | |
1501 | # the lock is held for the remainder of the current transaction. | |
1502 | self.engine.lock_table(txn, "user_ips") | |
1479 | # Lock the table just once, to prevent it being done once per row. | |
1480 | # Note that, according to Postgres' documentation, once obtained, | |
1481 | # the lock is held for the remainder of the current transaction. | |
1482 | self.engine.lock_table(txn, "user_ips") | |
1503 | 1483 | |
1504 | 1484 | for keyv, valv in zip(key_values, value_values): |
1505 | 1485 | _keys = {x: y for x, y in zip(key_names, keyv)} |
2074 | 2054 | retcols: Collection[str], |
2075 | 2055 | allow_none: bool = False, |
2076 | 2056 | ) -> Optional[Dict[str, Any]]: |
2077 | select_sql = "SELECT %s FROM %s WHERE %s" % ( | |
2078 | ", ".join(retcols), | |
2079 | table, | |
2080 | " AND ".join("%s = ?" % (k,) for k in keyvalues), | |
2081 | ) | |
2082 | ||
2083 | txn.execute(select_sql, list(keyvalues.values())) | |
2057 | select_sql = "SELECT %s FROM %s" % (", ".join(retcols), table) | |
2058 | ||
2059 | if keyvalues: | |
2060 | select_sql += " WHERE %s" % (" AND ".join("%s = ?" % k for k in keyvalues),) | |
2061 | txn.execute(select_sql, list(keyvalues.values())) | |
2062 | else: | |
2063 | txn.execute(select_sql) | |
2064 | ||
2084 | 2065 | row = txn.fetchone() |
2085 | 2066 | |
2086 | 2067 | if not row: |
26 | 26 | ) |
27 | 27 | |
28 | 28 | from synapse.api.constants import AccountDataTypes |
29 | from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |
30 | 29 | from synapse.replication.tcp.streams import AccountDataStream, TagAccountDataStream |
31 | 30 | from synapse.storage._base import db_to_json |
32 | 31 | from synapse.storage.database import ( |
67 | 66 | # to write account data. A value of `True` implies that `_account_data_id_gen` |
68 | 67 | # is an `AbstractStreamIdGenerator` and not just a tracker. |
69 | 68 | self._account_data_id_gen: AbstractStreamIdTracker |
69 | self._can_write_to_account_data = ( | |
70 | self._instance_name in hs.config.worker.writers.account_data | |
71 | ) | |
70 | 72 | |
71 | 73 | if isinstance(database.engine, PostgresEngine): |
72 | self._can_write_to_account_data = ( | |
73 | self._instance_name in hs.config.worker.writers.account_data | |
74 | ) | |
75 | ||
76 | 74 | self._account_data_id_gen = MultiWriterIdGenerator( |
77 | 75 | db_conn=db_conn, |
78 | 76 | db=database, |
94 | 92 | # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets |
95 | 93 | # updated over replication. (Multiple writers are not supported for |
96 | 94 | # SQLite). |
97 | if self._instance_name in hs.config.worker.writers.account_data: | |
98 | self._can_write_to_account_data = True | |
99 | self._account_data_id_gen = StreamIdGenerator( | |
100 | db_conn, | |
101 | "room_account_data", | |
102 | "stream_id", | |
103 | extra_tables=[("room_tags_revisions", "stream_id")], | |
104 | ) | |
105 | else: | |
106 | self._account_data_id_gen = SlavedIdTracker( | |
107 | db_conn, | |
108 | "room_account_data", | |
109 | "stream_id", | |
110 | extra_tables=[("room_tags_revisions", "stream_id")], | |
111 | ) | |
95 | self._account_data_id_gen = StreamIdGenerator( | |
96 | db_conn, | |
97 | "room_account_data", | |
98 | "stream_id", | |
99 | extra_tables=[("room_tags_revisions", "stream_id")], | |
100 | is_writer=self._instance_name in hs.config.worker.writers.account_data, | |
101 | ) | |
112 | 102 | |
113 | 103 | account_max = self.get_max_account_data_stream_id() |
114 | 104 | self._account_data_stream_cache = StreamChangeCache( |
458 | 448 | content_json = json_encoder.encode(content) |
459 | 449 | |
460 | 450 | async with self._account_data_id_gen.get_next() as next_id: |
461 | # no need to lock here as room_account_data has a unique constraint | |
462 | # on (user_id, room_id, account_data_type) so simple_upsert will | |
463 | # retry if there is a conflict. | |
464 | 451 | await self.db_pool.simple_upsert( |
465 | 452 | desc="add_room_account_data", |
466 | 453 | table="room_account_data", |
470 | 457 | "account_data_type": account_data_type, |
471 | 458 | }, |
472 | 459 | values={"stream_id": next_id, "content": content_json}, |
473 | lock=False, | |
474 | 460 | ) |
475 | 461 | |
476 | 462 | self._account_data_stream_cache.entity_has_changed(user_id, next_id) |
526 | 512 | ) -> None: |
527 | 513 | content_json = json_encoder.encode(content) |
528 | 514 | |
529 | # no need to lock here as account_data has a unique constraint on | |
530 | # (user_id, account_data_type) so simple_upsert will retry if | |
531 | # there is a conflict. | |
532 | 515 | self.db_pool.simple_upsert_txn( |
533 | 516 | txn, |
534 | 517 | table="account_data", |
535 | 518 | keyvalues={"user_id": user_id, "account_data_type": account_data_type}, |
536 | 519 | values={"stream_id": next_id, "content": content_json}, |
537 | lock=False, | |
538 | 520 | ) |
539 | 521 | |
540 | 522 | # Ignored users get denormalized into a separate table as an optimisation. |
19 | 19 | ApplicationService, |
20 | 20 | ApplicationServiceState, |
21 | 21 | AppServiceTransaction, |
22 | TransactionOneTimeKeyCounts, | |
22 | TransactionOneTimeKeysCount, | |
23 | 23 | TransactionUnusedFallbackKeys, |
24 | 24 | ) |
25 | 25 | from synapse.config.appservice import load_appservices |
259 | 259 | events: List[EventBase], |
260 | 260 | ephemeral: List[JsonDict], |
261 | 261 | to_device_messages: List[JsonDict], |
262 | one_time_key_counts: TransactionOneTimeKeyCounts, | |
262 | one_time_keys_count: TransactionOneTimeKeysCount, | |
263 | 263 | unused_fallback_keys: TransactionUnusedFallbackKeys, |
264 | 264 | device_list_summary: DeviceListUpdates, |
265 | 265 | ) -> AppServiceTransaction: |
272 | 272 | events: A list of persistent events to put in the transaction. |
273 | 273 | ephemeral: A list of ephemeral events to put in the transaction. |
274 | 274 | to_device_messages: A list of to-device messages to put in the transaction. |
275 | one_time_key_counts: Counts of remaining one-time keys for relevant | |
275 | one_time_keys_count: Counts of remaining one-time keys for relevant | |
276 | 276 | appservice devices in the transaction. |
277 | 277 | unused_fallback_keys: Lists of unused fallback keys for relevant |
278 | 278 | appservice devices in the transaction. |
298 | 298 | events=events, |
299 | 299 | ephemeral=ephemeral, |
300 | 300 | to_device_messages=to_device_messages, |
301 | one_time_key_counts=one_time_key_counts, | |
301 | one_time_keys_count=one_time_keys_count, | |
302 | 302 | unused_fallback_keys=unused_fallback_keys, |
303 | 303 | device_list_summary=device_list_summary, |
304 | 304 | ) |
378 | 378 | events=events, |
379 | 379 | ephemeral=[], |
380 | 380 | to_device_messages=[], |
381 | one_time_key_counts={}, | |
381 | one_time_keys_count={}, | |
382 | 382 | unused_fallback_keys={}, |
383 | 383 | device_list_summary=DeviceListUpdates(), |
384 | 384 | ) |
450 | 450 | table="application_services_state", |
451 | 451 | keyvalues={"as_id": service.id}, |
452 | 452 | values={f"{stream_type}_stream_id": pos}, |
453 | # no need to lock when emulating upsert: as_id is a unique key | |
454 | lock=False, | |
455 | 453 | desc="set_appservice_stream_type_pos", |
456 | 454 | ) |
457 | 455 |
258 | 258 | |
259 | 259 | if relates_to: |
260 | 260 | self._attempt_to_invalidate_cache("get_relations_for_event", (relates_to,)) |
261 | self._attempt_to_invalidate_cache("get_references_for_event", (relates_to,)) | |
261 | 262 | self._attempt_to_invalidate_cache( |
262 | 263 | "get_aggregation_groups_for_event", (relates_to,) |
263 | 264 | ) |
37 | 37 | whitelisted_homeserver, |
38 | 38 | ) |
39 | 39 | from synapse.metrics.background_process_metrics import wrap_as_background_process |
40 | from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |
41 | 40 | from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream |
42 | 41 | from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause |
43 | 42 | from synapse.storage.database import ( |
85 | 84 | ): |
86 | 85 | super().__init__(database, db_conn, hs) |
87 | 86 | |
88 | if hs.config.worker.worker_app is None: | |
89 | self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |
90 | db_conn, | |
91 | "device_lists_stream", | |
92 | "stream_id", | |
93 | extra_tables=[ | |
94 | ("user_signature_stream", "stream_id"), | |
95 | ("device_lists_outbound_pokes", "stream_id"), | |
96 | ("device_lists_changes_in_room", "stream_id"), | |
97 | ], | |
98 | ) | |
99 | else: | |
100 | self._device_list_id_gen = SlavedIdTracker( | |
101 | db_conn, | |
102 | "device_lists_stream", | |
103 | "stream_id", | |
104 | extra_tables=[ | |
105 | ("user_signature_stream", "stream_id"), | |
106 | ("device_lists_outbound_pokes", "stream_id"), | |
107 | ("device_lists_changes_in_room", "stream_id"), | |
108 | ], | |
109 | ) | |
87 | # In the worker store this is an ID tracker which we overwrite in the non-worker | |
88 | # class below that is used on the main process. | |
89 | self._device_list_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |
90 | db_conn, | |
91 | "device_lists_stream", | |
92 | "stream_id", | |
93 | extra_tables=[ | |
94 | ("user_signature_stream", "stream_id"), | |
95 | ("device_lists_outbound_pokes", "stream_id"), | |
96 | ("device_lists_changes_in_room", "stream_id"), | |
97 | ], | |
98 | is_writer=hs.config.worker.worker_app is None, | |
99 | ) | |
110 | 100 | |
111 | 101 | # Type-ignore: _device_list_id_gen is mixed in from either DataStore (as a |
112 | 102 | # StreamIdGenerator) or SlavedDataStore (as a SlavedIdTracker). |
534 | 524 | limit: Maximum number of device updates to return |
535 | 525 | |
536 | 526 | Returns: |
537 | List: List of device update tuples: | |
527 | List of device update tuples: | |
538 | 528 | - user_id |
539 | 529 | - device_id |
540 | 530 | - stream_id |
1450 | 1440 | self._remove_duplicate_outbound_pokes, |
1451 | 1441 | ) |
1452 | 1442 | |
1443 | self.db_pool.updates.register_background_index_update( | |
1444 | "device_lists_changes_in_room_by_room_index", | |
1445 | index_name="device_lists_changes_in_room_by_room_idx", | |
1446 | table="device_lists_changes_in_room", | |
1447 | columns=["room_id", "stream_id"], | |
1448 | ) | |
1449 | ||
1453 | 1450 | async def _drop_device_list_streams_non_unique_indexes( |
1454 | 1451 | self, progress: JsonDict, batch_size: int |
1455 | 1452 | ) -> int: |
1746 | 1743 | table="device_lists_remote_cache", |
1747 | 1744 | keyvalues={"user_id": user_id, "device_id": device_id}, |
1748 | 1745 | values={"content": json_encoder.encode(content)}, |
1749 | # we don't need to lock, because we assume we are the only thread | |
1750 | # updating this user's devices. | |
1751 | lock=False, | |
1752 | 1746 | ) |
1753 | 1747 | |
1754 | 1748 | txn.call_after(self._get_cached_user_device.invalidate, (user_id, device_id)) |
1762 | 1756 | table="device_lists_remote_extremeties", |
1763 | 1757 | keyvalues={"user_id": user_id}, |
1764 | 1758 | values={"stream_id": stream_id}, |
1765 | # again, we can assume we are the only thread updating this user's | |
1766 | # extremity. | |
1767 | lock=False, | |
1768 | 1759 | ) |
1769 | 1760 | |
1770 | 1761 | async def update_remote_device_list_cache( |
1817 | 1808 | table="device_lists_remote_extremeties", |
1818 | 1809 | keyvalues={"user_id": user_id}, |
1819 | 1810 | values={"stream_id": stream_id}, |
1820 | # we don't need to lock, because we can assume we are the only thread | |
1821 | # updating this user's extremity. | |
1822 | lock=False, | |
1823 | 1811 | ) |
1824 | 1812 | |
1825 | 1813 | async def add_device_change_to_streams( |
2017 | 2005 | ) |
2018 | 2006 | |
2019 | 2007 | async def get_uncoverted_outbound_room_pokes( |
2020 | self, limit: int = 10 | |
2008 | self, start_stream_id: int, start_room_id: str, limit: int = 10 | |
2021 | 2009 | ) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]: |
2022 | 2010 | """Get device list changes by room that have not yet been handled and |
2023 | 2011 | written to `device_lists_outbound_pokes`. |
2024 | 2012 | |
2013 | Args: | |
2014 | start_stream_id: Together with `start_room_id`, indicates the position after | |
2015 | which to return device list changes. | |
2016 | start_room_id: Together with `start_stream_id`, indicates the position after | |
2017 | which to return device list changes. | |
2018 | limit: The maximum number of device list changes to return. | |
2019 | ||
2025 | 2020 | Returns: |
2026 | A list of user ID, device ID, room ID, stream ID and optional opentracing context. | |
2021 | A list of user ID, device ID, room ID, stream ID and optional opentracing | |
2022 | context, in order of ascending (stream ID, room ID). | |
2027 | 2023 | """ |
2028 | 2024 | |
2029 | 2025 | sql = """ |
2030 | 2026 | SELECT user_id, device_id, room_id, stream_id, opentracing_context |
2031 | 2027 | FROM device_lists_changes_in_room |
2032 | WHERE NOT converted_to_destinations | |
2033 | ORDER BY stream_id | |
2028 | WHERE | |
2029 | (stream_id, room_id) > (?, ?) AND | |
2030 | stream_id <= ? AND | |
2031 | NOT converted_to_destinations | |
2032 | ORDER BY stream_id ASC, room_id ASC | |
2034 | 2033 | LIMIT ? |
2035 | 2034 | """ |
2036 | 2035 | |
2037 | 2036 | def get_uncoverted_outbound_room_pokes_txn( |
2038 | 2037 | txn: LoggingTransaction, |
2039 | 2038 | ) -> List[Tuple[str, str, str, int, Optional[Dict[str, str]]]]: |
2040 | txn.execute(sql, (limit,)) | |
2039 | txn.execute( | |
2040 | sql, | |
2041 | ( | |
2042 | start_stream_id, | |
2043 | start_room_id, | |
2044 | # Avoid returning rows if there may be uncommitted device list | |
2045 | # changes with smaller stream IDs. | |
2046 | self._device_list_id_gen.get_current_token(), | |
2047 | limit, | |
2048 | ), | |
2049 | ) | |
2041 | 2050 | |
2042 | 2051 | return [ |
2043 | 2052 | ( |
2059 | 2068 | user_id: str, |
2060 | 2069 | device_id: str, |
2061 | 2070 | room_id: str, |
2062 | stream_id: Optional[int], | |
2063 | 2071 | hosts: Collection[str], |
2064 | 2072 | context: Optional[Dict[str, str]], |
2065 | 2073 | ) -> None: |
2066 | 2074 | """Queue the device update to be sent to the given set of hosts, |
2067 | 2075 | calculated from the room ID. |
2068 | ||
2069 | Marks the associated row in `device_lists_changes_in_room` as handled, | |
2070 | if `stream_id` is provided. | |
2071 | """ | |
2076 | """ | |
2077 | if not hosts: | |
2078 | return | |
2072 | 2079 | |
2073 | 2080 | def add_device_list_outbound_pokes_txn( |
2074 | 2081 | txn: LoggingTransaction, stream_ids: List[int] |
2075 | 2082 | ) -> None: |
2076 | if hosts: | |
2077 | self._add_device_outbound_poke_to_stream_txn( | |
2078 | txn, | |
2079 | user_id=user_id, | |
2080 | device_id=device_id, | |
2081 | hosts=hosts, | |
2082 | stream_ids=stream_ids, | |
2083 | context=context, | |
2084 | ) | |
2085 | ||
2086 | if stream_id: | |
2087 | self.db_pool.simple_update_txn( | |
2088 | txn, | |
2089 | table="device_lists_changes_in_room", | |
2090 | keyvalues={ | |
2091 | "user_id": user_id, | |
2092 | "device_id": device_id, | |
2093 | "stream_id": stream_id, | |
2094 | "room_id": room_id, | |
2095 | }, | |
2096 | updatevalues={"converted_to_destinations": True}, | |
2097 | ) | |
2098 | ||
2099 | if not hosts: | |
2100 | # If there are no hosts then we don't try and generate stream IDs. | |
2101 | return await self.db_pool.runInteraction( | |
2102 | "add_device_list_outbound_pokes", | |
2103 | add_device_list_outbound_pokes_txn, | |
2104 | [], | |
2083 | self._add_device_outbound_poke_to_stream_txn( | |
2084 | txn, | |
2085 | user_id=user_id, | |
2086 | device_id=device_id, | |
2087 | hosts=hosts, | |
2088 | stream_ids=stream_ids, | |
2089 | context=context, | |
2105 | 2090 | ) |
2106 | 2091 | |
2107 | 2092 | async with self._device_list_id_gen.get_next_mult(len(hosts)) as stream_ids: |
2165 | 2150 | "get_pending_remote_device_list_updates_for_room", |
2166 | 2151 | get_pending_remote_device_list_updates_for_room_txn, |
2167 | 2152 | ) |
2153 | ||
2154 | async def get_device_change_last_converted_pos(self) -> Tuple[int, str]: | |
2155 | """ | |
2156 | Get the position of the last row in `device_list_changes_in_room` that has been | |
2157 | converted to `device_lists_outbound_pokes`. | |
2158 | ||
2159 | Rows with a strictly greater position where `converted_to_destinations` is | |
2160 | `FALSE` have not been converted. | |
2161 | """ | |
2162 | ||
2163 | row = await self.db_pool.simple_select_one( | |
2164 | table="device_lists_changes_converted_stream_position", | |
2165 | keyvalues={}, | |
2166 | retcols=["stream_id", "room_id"], | |
2167 | desc="get_device_change_last_converted_pos", | |
2168 | ) | |
2169 | return row["stream_id"], row["room_id"] | |
2170 | ||
2171 | async def set_device_change_last_converted_pos( | |
2172 | self, | |
2173 | stream_id: int, | |
2174 | room_id: str, | |
2175 | ) -> None: | |
2176 | """ | |
2177 | Set the position of the last row in `device_list_changes_in_room` that has been | |
2178 | converted to `device_lists_outbound_pokes`. | |
2179 | """ | |
2180 | ||
2181 | await self.db_pool.simple_update_one( | |
2182 | table="device_lists_changes_converted_stream_position", | |
2183 | keyvalues={}, | |
2184 | updatevalues={"stream_id": stream_id, "room_id": room_id}, | |
2185 | desc="set_device_change_last_converted_pos", | |
2186 | ) |
390 | 390 | Returns: |
391 | 391 | A dict giving the info metadata for this backup version, with |
392 | 392 | fields including: |
393 | version(str) | |
394 | algorithm(str) | |
395 | auth_data(object): opaque dict supplied by the client | |
396 | etag(int): tag of the keys in the backup | |
393 | version (str) | |
394 | algorithm (str) | |
395 | auth_data (object): opaque dict supplied by the client | |
396 | etag (int): tag of the keys in the backup | |
397 | 397 | """ |
398 | 398 | |
399 | 399 | def _get_e2e_room_keys_version_info_txn(txn: LoggingTransaction) -> JsonDict: |
32 | 32 | |
33 | 33 | from synapse.api.constants import DeviceKeyAlgorithms |
34 | 34 | from synapse.appservice import ( |
35 | TransactionOneTimeKeyCounts, | |
35 | TransactionOneTimeKeysCount, | |
36 | 36 | TransactionUnusedFallbackKeys, |
37 | 37 | ) |
38 | 38 | from synapse.logging.opentracing import log_kv, set_tag, trace |
411 | 411 | """Retrieve a number of one-time keys for a user |
412 | 412 | |
413 | 413 | Args: |
414 | user_id(str): id of user to get keys for | |
415 | device_id(str): id of device to get keys for | |
416 | key_ids(list[str]): list of key ids (excluding algorithm) to | |
417 | retrieve | |
414 | user_id: id of user to get keys for | |
415 | device_id: id of device to get keys for | |
416 | key_ids: list of key ids (excluding algorithm) to retrieve | |
418 | 417 | |
419 | 418 | Returns: |
420 | 419 | A map from (algorithm, key_id) to json string for key |
514 | 513 | |
515 | 514 | async def count_bulk_e2e_one_time_keys_for_as( |
516 | 515 | self, user_ids: Collection[str] |
517 | ) -> TransactionOneTimeKeyCounts: | |
516 | ) -> TransactionOneTimeKeysCount: | |
518 | 517 | """ |
519 | 518 | Counts, in bulk, the one-time keys for all the users specified. |
520 | 519 | Intended to be used by application services for populating OTK counts in |
528 | 527 | |
529 | 528 | def _count_bulk_e2e_one_time_keys_txn( |
530 | 529 | txn: LoggingTransaction, |
531 | ) -> TransactionOneTimeKeyCounts: | |
530 | ) -> TransactionOneTimeKeysCount: | |
532 | 531 | user_in_where_clause, user_parameters = make_in_list_sql_clause( |
533 | 532 | self.database_engine, "user_id", user_ids |
534 | 533 | ) |
541 | 540 | """ |
542 | 541 | txn.execute(sql, user_parameters) |
543 | 542 | |
544 | result: TransactionOneTimeKeyCounts = {} | |
543 | result: TransactionOneTimeKeysCount = {} | |
545 | 544 | |
546 | 545 | for user_id, device_id, algorithm, count in txn: |
547 | 546 | # We deliberately construct empty dictionaries for |
1685 | 1685 | }, |
1686 | 1686 | insertion_values={}, |
1687 | 1687 | desc="insert_insertion_extremity", |
1688 | lock=False, | |
1689 | 1688 | ) |
1690 | 1689 | |
1691 | 1690 | async def insert_received_event_to_staging( |
1278 | 1278 | Pick the earliest non-outlier if there is one, else the earliest one. |
1279 | 1279 | |
1280 | 1280 | Args: |
1281 | events_and_contexts (list[(EventBase, EventContext)]): | |
1281 | events_and_contexts: | |
1282 | ||
1282 | 1283 | Returns: |
1283 | list[(EventBase, EventContext)]: filtered list | |
1284 | filtered list | |
1284 | 1285 | """ |
1285 | 1286 | new_events_and_contexts: OrderedDict[ |
1286 | 1287 | str, Tuple[EventBase, EventContext] |
1306 | 1307 | """Update min_depth for each room |
1307 | 1308 | |
1308 | 1309 | Args: |
1309 | txn (twisted.enterprise.adbapi.Connection): db connection | |
1310 | events_and_contexts (list[(EventBase, EventContext)]): events | |
1311 | we are persisting | |
1310 | txn: db connection | |
1311 | events_and_contexts: events we are persisting | |
1312 | 1312 | """ |
1313 | 1313 | depth_updates: Dict[str, int] = {} |
1314 | 1314 | for event, context in events_and_contexts: |
1579 | 1579 | """Update all the miscellaneous tables for new events |
1580 | 1580 | |
1581 | 1581 | Args: |
1582 | txn (twisted.enterprise.adbapi.Connection): db connection | |
1583 | events_and_contexts (list[(EventBase, EventContext)]): events | |
1584 | we are persisting | |
1585 | all_events_and_contexts (list[(EventBase, EventContext)]): all | |
1586 | events that we were going to persist. This includes events | |
1587 | we've already persisted, etc, that wouldn't appear in | |
1588 | events_and_context. | |
1582 | txn: db connection | |
1583 | events_and_contexts: events we are persisting | |
1584 | all_events_and_contexts: all events that we were going to persist. | |
1585 | This includes events we've already persisted, etc, that wouldn't | |
1586 | appear in events_and_context. | |
1589 | 1587 | inhibit_local_membership_updates: Stop the local_current_membership |
1590 | 1588 | from being updated by these events. This should be set to True |
1591 | 1589 | for backfilled events because backfilled events in the past do |
2050 | 2048 | self.store._invalidate_cache_and_stream( |
2051 | 2049 | txn, self.store.get_aggregation_groups_for_event, (redacted_relates_to,) |
2052 | 2050 | ) |
2051 | if rel_type == RelationTypes.REFERENCE: | |
2052 | self.store._invalidate_cache_and_stream( | |
2053 | txn, self.store.get_references_for_event, (redacted_relates_to,) | |
2054 | ) | |
2053 | 2055 | if rel_type == RelationTypes.REPLACE: |
2054 | 2056 | self.store._invalidate_cache_and_stream( |
2055 | 2057 | txn, self.store.get_applicable_edit, (redacted_relates_to,) |
58 | 58 | run_as_background_process, |
59 | 59 | wrap_as_background_process, |
60 | 60 | ) |
61 | from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |
62 | 61 | from synapse.replication.tcp.streams import BackfillStream |
63 | 62 | from synapse.replication.tcp.streams.events import EventsStream |
64 | 63 | from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause |
212 | 211 | # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets |
213 | 212 | # updated over replication. (Multiple writers are not supported for |
214 | 213 | # SQLite). |
215 | if hs.get_instance_name() in hs.config.worker.writers.events: | |
216 | self._stream_id_gen = StreamIdGenerator( | |
217 | db_conn, | |
218 | "events", | |
219 | "stream_ordering", | |
220 | ) | |
221 | self._backfill_id_gen = StreamIdGenerator( | |
222 | db_conn, | |
223 | "events", | |
224 | "stream_ordering", | |
225 | step=-1, | |
226 | extra_tables=[("ex_outlier_stream", "event_stream_ordering")], | |
227 | ) | |
228 | else: | |
229 | self._stream_id_gen = SlavedIdTracker( | |
230 | db_conn, "events", "stream_ordering" | |
231 | ) | |
232 | self._backfill_id_gen = SlavedIdTracker( | |
233 | db_conn, "events", "stream_ordering", step=-1 | |
234 | ) | |
214 | self._stream_id_gen = StreamIdGenerator( | |
215 | db_conn, | |
216 | "events", | |
217 | "stream_ordering", | |
218 | is_writer=hs.get_instance_name() in hs.config.worker.writers.events, | |
219 | ) | |
220 | self._backfill_id_gen = StreamIdGenerator( | |
221 | db_conn, | |
222 | "events", | |
223 | "stream_ordering", | |
224 | step=-1, | |
225 | extra_tables=[("ex_outlier_stream", "event_stream_ordering")], | |
226 | is_writer=hs.get_instance_name() in hs.config.worker.writers.events, | |
227 | ) | |
235 | 228 | |
236 | 229 | events_max = self._stream_id_gen.get_current_token() |
237 | 230 | curr_state_delta_prefill, min_curr_state_delta_id = self.db_pool.get_cache_dict( |
1588 | 1581 | room_id: The room ID to query. |
1589 | 1582 | |
1590 | 1583 | Returns: |
1591 | dict[str:float] of complexity version to complexity. | |
1584 | Map of complexity version to complexity. | |
1592 | 1585 | """ |
1593 | 1586 | state_events = await self.get_current_state_event_counts(room_id) |
1594 | 1587 |
216 | 216 | def _reap_users(txn: LoggingTransaction, reserved_users: List[str]) -> None: |
217 | 217 | """ |
218 | 218 | Args: |
219 | reserved_users (tuple): reserved users to preserve | |
219 | reserved_users: reserved users to preserve | |
220 | 220 | """ |
221 | 221 | |
222 | 222 | thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30) |
369 | 369 | should not appear in the MAU stats). |
370 | 370 | |
371 | 371 | Args: |
372 | txn (cursor): | |
373 | user_id (str): user to add/update | |
372 | txn: | |
373 | user_id: user to add/update | |
374 | 374 | """ |
375 | 375 | assert ( |
376 | 376 | self._update_on_this_worker |
400 | 400 | add the user to the monthly active tables |
401 | 401 | |
402 | 402 | Args: |
403 | user_id(str): the user_id to query | |
403 | user_id: the user_id to query | |
404 | 404 | """ |
405 | 405 | assert ( |
406 | 406 | self._update_on_this_worker |
29 | 29 | |
30 | 30 | from synapse.api.errors import StoreError |
31 | 31 | from synapse.config.homeserver import ExperimentalConfig |
32 | from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |
33 | 32 | from synapse.replication.tcp.streams import PushRulesStream |
34 | 33 | from synapse.storage._base import SQLBaseStore |
35 | 34 | from synapse.storage.database import ( |
84 | 83 | push_rules = PushRules(ruleslist) |
85 | 84 | |
86 | 85 | filtered_rules = FilteredPushRules( |
87 | push_rules, enabled_map, msc3664_enabled=experimental_config.msc3664_enabled | |
86 | push_rules, | |
87 | enabled_map, | |
88 | msc3664_enabled=experimental_config.msc3664_enabled, | |
89 | msc1767_enabled=experimental_config.msc1767_enabled, | |
88 | 90 | ) |
89 | 91 | |
90 | 92 | return filtered_rules |
110 | 112 | ): |
111 | 113 | super().__init__(database, db_conn, hs) |
112 | 114 | |
113 | if hs.config.worker.worker_app is None: | |
114 | self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |
115 | db_conn, "push_rules_stream", "stream_id" | |
116 | ) | |
117 | else: | |
118 | self._push_rules_stream_id_gen = SlavedIdTracker( | |
119 | db_conn, "push_rules_stream", "stream_id" | |
120 | ) | |
115 | # In the worker store this is an ID tracker which we overwrite in the non-worker | |
116 | # class below that is used on the main process. | |
117 | self._push_rules_stream_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |
118 | db_conn, | |
119 | "push_rules_stream", | |
120 | "stream_id", | |
121 | is_writer=hs.config.worker.worker_app is None, | |
122 | ) | |
121 | 123 | |
122 | 124 | push_rules_prefill, push_rules_id = self.db_pool.get_cache_dict( |
123 | 125 | db_conn, |
26 | 26 | ) |
27 | 27 | |
28 | 28 | from synapse.push import PusherConfig, ThrottleParams |
29 | from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |
30 | 29 | from synapse.replication.tcp.streams import PushersStream |
31 | 30 | from synapse.storage._base import SQLBaseStore, db_to_json |
32 | 31 | from synapse.storage.database import ( |
58 | 57 | ): |
59 | 58 | super().__init__(database, db_conn, hs) |
60 | 59 | |
61 | if hs.config.worker.worker_app is None: | |
62 | self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |
63 | db_conn, | |
64 | "pushers", | |
65 | "id", | |
66 | extra_tables=[("deleted_pushers", "stream_id")], | |
67 | ) | |
68 | else: | |
69 | self._pushers_id_gen = SlavedIdTracker( | |
70 | db_conn, | |
71 | "pushers", | |
72 | "id", | |
73 | extra_tables=[("deleted_pushers", "stream_id")], | |
74 | ) | |
60 | # In the worker store this is an ID tracker which we overwrite in the non-worker | |
61 | # class below that is used on the main process. | |
62 | self._pushers_id_gen: AbstractStreamIdTracker = StreamIdGenerator( | |
63 | db_conn, | |
64 | "pushers", | |
65 | "id", | |
66 | extra_tables=[("deleted_pushers", "stream_id")], | |
67 | is_writer=hs.config.worker.worker_app is None, | |
68 | ) | |
75 | 69 | |
76 | 70 | self.db_pool.updates.register_background_update_handler( |
77 | 71 | "remove_deactivated_pushers", |
330 | 324 | async def set_throttle_params( |
331 | 325 | self, pusher_id: str, room_id: str, params: ThrottleParams |
332 | 326 | ) -> None: |
333 | # no need to lock because `pusher_throttle` has a primary key on | |
334 | # (pusher, room_id) so simple_upsert will retry | |
335 | 327 | await self.db_pool.simple_upsert( |
336 | 328 | "pusher_throttle", |
337 | 329 | {"pusher": pusher_id, "room_id": room_id}, |
338 | 330 | {"last_sent_ts": params.last_sent_ts, "throttle_ms": params.throttle_ms}, |
339 | 331 | desc="set_throttle_params", |
340 | lock=False, | |
341 | 332 | ) |
342 | 333 | |
343 | 334 | async def _remove_deactivated_pushers(self, progress: dict, batch_size: int) -> int: |
594 | 585 | device_id: Optional[str] = None, |
595 | 586 | ) -> None: |
596 | 587 | async with self._pushers_id_gen.get_next() as stream_id: |
597 | # no need to lock because `pushers` has a unique key on | |
598 | # (app_id, pushkey, user_name) so simple_upsert will retry | |
599 | 588 | await self.db_pool.simple_upsert( |
600 | 589 | table="pushers", |
601 | 590 | keyvalues={"app_id": app_id, "pushkey": pushkey, "user_name": user_id}, |
614 | 603 | "device_id": device_id, |
615 | 604 | }, |
616 | 605 | desc="add_pusher", |
617 | lock=False, | |
618 | 606 | ) |
619 | 607 | |
620 | 608 | user_has_pusher = self.get_if_user_has_pusher.cache.get_immediate( |
26 | 26 | ) |
27 | 27 | |
28 | 28 | from synapse.api.constants import EduTypes |
29 | from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker | |
30 | 29 | from synapse.replication.tcp.streams import ReceiptsStream |
31 | 30 | from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause |
32 | 31 | from synapse.storage.database import ( |
60 | 59 | hs: "HomeServer", |
61 | 60 | ): |
62 | 61 | self._instance_name = hs.get_instance_name() |
62 | ||
63 | # In the worker store this is an ID tracker which we overwrite in the non-worker | |
64 | # class below that is used on the main process. | |
63 | 65 | self._receipts_id_gen: AbstractStreamIdTracker |
64 | 66 | |
65 | 67 | if isinstance(database.engine, PostgresEngine): |
86 | 88 | # `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets |
87 | 89 | # updated over replication. (Multiple writers are not supported for |
88 | 90 | # SQLite). |
89 | if hs.get_instance_name() in hs.config.worker.writers.receipts: | |
90 | self._receipts_id_gen = StreamIdGenerator( | |
91 | db_conn, "receipts_linearized", "stream_id" | |
92 | ) | |
93 | else: | |
94 | self._receipts_id_gen = SlavedIdTracker( | |
95 | db_conn, "receipts_linearized", "stream_id" | |
96 | ) | |
91 | self._receipts_id_gen = StreamIdGenerator( | |
92 | db_conn, | |
93 | "receipts_linearized", | |
94 | "stream_id", | |
95 | is_writer=hs.get_instance_name() in hs.config.worker.writers.receipts, | |
96 | ) | |
97 | 97 | |
98 | 98 | super().__init__(database, db_conn, hs) |
99 | 99 |
952 | 952 | """Returns user id from threepid |
953 | 953 | |
954 | 954 | Args: |
955 | txn (cursor): | |
955 | txn: | |
956 | 956 | medium: threepid medium e.g. email |
957 | 957 | address: threepid address e.g. me@example.com |
958 | 958 | |
1282 | 1282 | """Sets an expiration date to the account with the given user ID. |
1283 | 1283 | |
1284 | 1284 | Args: |
1285 | user_id (str): User ID to set an expiration date for. | |
1286 | use_delta (bool): If set to False, the expiration date for the user will be | |
1285 | user_id: User ID to set an expiration date for. | |
1286 | use_delta: If set to False, the expiration date for the user will be | |
1287 | 1287 | now + validity period. If set to True, this expiration date will be a |
1288 | 1288 | random value in the [now + period - d ; now + period] range, d being a |
1289 | 1289 | delta equal to 10% of the validity period. |
19 | 19 | FrozenSet, |
20 | 20 | Iterable, |
21 | 21 | List, |
22 | Mapping, | |
22 | 23 | Optional, |
23 | 24 | Set, |
24 | 25 | Tuple, |
80 | 81 | event_id: str |
81 | 82 | # The sender of the related event. |
82 | 83 | sender: str |
83 | topological_ordering: Optional[int] | |
84 | stream_ordering: int | |
85 | 84 | |
86 | 85 | |
87 | 86 | class RelationsWorkerStore(SQLBaseStore): |
244 | 243 | txn.execute(sql, where_args + [limit + 1]) |
245 | 244 | |
246 | 245 | events = [] |
247 | for event_id, relation_type, sender, topo_ordering, stream_ordering in txn: | |
246 | topo_orderings: List[int] = [] | |
247 | stream_orderings: List[int] = [] | |
248 | for event_id, relation_type, sender, topo_ordering, stream_ordering in cast( | |
249 | List[Tuple[str, str, str, int, int]], txn | |
250 | ): | |
248 | 251 | # Do not include edits for redacted events as they leak event |
249 | 252 | # content. |
250 | 253 | if not is_redacted or relation_type != RelationTypes.REPLACE: |
251 | events.append( | |
252 | _RelatedEvent(event_id, sender, topo_ordering, stream_ordering) | |
253 | ) | |
254 | events.append(_RelatedEvent(event_id, sender)) | |
255 | topo_orderings.append(topo_ordering) | |
256 | stream_orderings.append(stream_ordering) | |
254 | 257 | |
255 | 258 | # If there are more events, generate the next pagination key from the |
256 | 259 | # last event returned. |
259 | 262 | # Instead of using the last row (which tells us there is more |
260 | 263 | # data), use the last row to be returned. |
261 | 264 | events = events[:limit] |
262 | ||
263 | topo = events[-1].topological_ordering | |
264 | token = events[-1].stream_ordering | |
265 | topo_orderings = topo_orderings[:limit] | |
266 | stream_orderings = stream_orderings[:limit] | |
267 | ||
268 | topo = topo_orderings[-1] | |
269 | token = stream_orderings[-1] | |
265 | 270 | if direction == "b": |
266 | 271 | # Tokens are positions between events. |
267 | 272 | # This token points *after* the last event in the chunk. |
393 | 398 | ) |
394 | 399 | return result is not None |
395 | 400 | |
396 | @cached(tree=True) | |
397 | async def get_aggregation_groups_for_event( | |
398 | self, event_id: str, room_id: str, limit: int = 5 | |
399 | ) -> List[JsonDict]: | |
400 | """Get a list of annotations on the event, grouped by event type and | |
401 | @cached() | |
402 | async def get_aggregation_groups_for_event(self, event_id: str) -> List[JsonDict]: | |
403 | raise NotImplementedError() | |
404 | ||
405 | @cachedList( | |
406 | cached_method_name="get_aggregation_groups_for_event", list_name="event_ids" | |
407 | ) | |
408 | async def get_aggregation_groups_for_events( | |
409 | self, event_ids: Collection[str] | |
410 | ) -> Mapping[str, Optional[List[JsonDict]]]: | |
411 | """Get a list of annotations on the given events, grouped by event type and | |
401 | 412 | aggregation key, sorted by count. |
402 | 413 | |
403 | 414 | This is used e.g. to get the what and how many reactions have happend |
404 | 415 | on an event. |
405 | 416 | |
406 | 417 | Args: |
407 | event_id: Fetch events that relate to this event ID. | |
408 | room_id: The room the event belongs to. | |
409 | limit: Only fetch the `limit` groups. | |
410 | ||
411 | Returns: | |
412 | List of groups of annotations that match. Each row is a dict with | |
413 | `type`, `key` and `count` fields. | |
414 | """ | |
415 | ||
416 | args = [ | |
417 | event_id, | |
418 | room_id, | |
419 | RelationTypes.ANNOTATION, | |
420 | limit, | |
421 | ] | |
422 | ||
423 | sql = """ | |
424 | SELECT type, aggregation_key, COUNT(DISTINCT sender) | |
425 | FROM event_relations | |
426 | INNER JOIN events USING (event_id) | |
427 | WHERE relates_to_id = ? AND room_id = ? AND relation_type = ? | |
428 | GROUP BY relation_type, type, aggregation_key | |
429 | ORDER BY COUNT(*) DESC | |
430 | LIMIT ? | |
431 | """ | |
432 | ||
433 | def _get_aggregation_groups_for_event_txn( | |
418 | event_ids: Fetch events that relate to these event IDs. | |
419 | ||
420 | Returns: | |
421 | A map of event IDs to a list of groups of annotations that match. | |
422 | Each entry is a dict with `type`, `key` and `count` fields. | |
423 | """ | |
424 | # The number of entries to return per event ID. | |
425 | limit = 5 | |
426 | ||
427 | clause, args = make_in_list_sql_clause( | |
428 | self.database_engine, "relates_to_id", event_ids | |
429 | ) | |
430 | args.append(RelationTypes.ANNOTATION) | |
431 | ||
432 | sql = f""" | |
433 | SELECT | |
434 | relates_to_id, | |
435 | annotation.type, | |
436 | aggregation_key, | |
437 | COUNT(DISTINCT annotation.sender) | |
438 | FROM events AS annotation | |
439 | INNER JOIN event_relations USING (event_id) | |
440 | INNER JOIN events AS parent ON | |
441 | parent.event_id = relates_to_id | |
442 | AND parent.room_id = annotation.room_id | |
443 | WHERE | |
444 | {clause} | |
445 | AND relation_type = ? | |
446 | GROUP BY relates_to_id, annotation.type, aggregation_key | |
447 | ORDER BY relates_to_id, COUNT(*) DESC | |
448 | """ | |
449 | ||
450 | def _get_aggregation_groups_for_events_txn( | |
434 | 451 | txn: LoggingTransaction, |
435 | ) -> List[JsonDict]: | |
452 | ) -> Mapping[str, List[JsonDict]]: | |
436 | 453 | txn.execute(sql, args) |
437 | 454 | |
438 | return [{"type": row[0], "key": row[1], "count": row[2]} for row in txn] | |
455 | result: Dict[str, List[JsonDict]] = {} | |
456 | for event_id, type, key, count in cast( | |
457 | List[Tuple[str, str, str, int]], txn | |
458 | ): | |
459 | event_results = result.setdefault(event_id, []) | |
460 | ||
461 | # Limit the number of results per event ID. | |
462 | if len(event_results) == limit: | |
463 | continue | |
464 | ||
465 | event_results.append({"type": type, "key": key, "count": count}) | |
466 | ||
467 | return result | |
439 | 468 | |
440 | 469 | return await self.db_pool.runInteraction( |
441 | "get_aggregation_groups_for_event", _get_aggregation_groups_for_event_txn | |
470 | "get_aggregation_groups_for_events", _get_aggregation_groups_for_events_txn | |
442 | 471 | ) |
443 | 472 | |
444 | 473 | async def get_aggregation_groups_for_users( |
445 | self, | |
446 | event_id: str, | |
447 | room_id: str, | |
448 | limit: int, | |
449 | users: FrozenSet[str] = frozenset(), | |
450 | ) -> Dict[Tuple[str, str], int]: | |
474 | self, event_ids: Collection[str], users: FrozenSet[str] | |
475 | ) -> Dict[str, Dict[Tuple[str, str], int]]: | |
451 | 476 | """Fetch the partial aggregations for an event for specific users. |
452 | 477 | |
453 | 478 | This is used, in conjunction with get_aggregation_groups_for_event, to |
454 | 479 | remove information from the results for ignored users. |
455 | 480 | |
456 | 481 | Args: |
457 | event_id: Fetch events that relate to this event ID. | |
458 | room_id: The room the event belongs to. | |
459 | limit: Only fetch the `limit` groups. | |
482 | event_ids: Fetch events that relate to these event IDs. | |
460 | 483 | users: The users to fetch information for. |
461 | 484 | |
462 | 485 | Returns: |
463 | A map of (event type, aggregation key) to a count of users. | |
486 | A map of event ID to a map of (event type, aggregation key) to a | |
487 | count of users. | |
464 | 488 | """ |
465 | 489 | |
466 | 490 | if not users: |
467 | 491 | return {} |
468 | 492 | |
469 | args: List[Union[str, int]] = [ | |
470 | event_id, | |
471 | room_id, | |
472 | RelationTypes.ANNOTATION, | |
473 | ] | |
493 | events_sql, args = make_in_list_sql_clause( | |
494 | self.database_engine, "relates_to_id", event_ids | |
495 | ) | |
474 | 496 | |
475 | 497 | users_sql, users_args = make_in_list_sql_clause( |
476 | self.database_engine, "sender", users | |
498 | self.database_engine, "annotation.sender", users | |
477 | 499 | ) |
478 | 500 | args.extend(users_args) |
501 | args.append(RelationTypes.ANNOTATION) | |
479 | 502 | |
480 | 503 | sql = f""" |
481 | SELECT type, aggregation_key, COUNT(DISTINCT sender) | |
482 | FROM event_relations | |
483 | INNER JOIN events USING (event_id) | |
484 | WHERE relates_to_id = ? AND room_id = ? AND relation_type = ? AND {users_sql} | |
485 | GROUP BY relation_type, type, aggregation_key | |
486 | ORDER BY COUNT(*) DESC | |
487 | LIMIT ? | |
504 | SELECT | |
505 | relates_to_id, | |
506 | annotation.type, | |
507 | aggregation_key, | |
508 | COUNT(DISTINCT annotation.sender) | |
509 | FROM events AS annotation | |
510 | INNER JOIN event_relations USING (event_id) | |
511 | INNER JOIN events AS parent ON | |
512 | parent.event_id = relates_to_id | |
513 | AND parent.room_id = annotation.room_id | |
514 | WHERE {events_sql} AND {users_sql} AND relation_type = ? | |
515 | GROUP BY relates_to_id, annotation.type, aggregation_key | |
516 | ORDER BY relates_to_id, COUNT(*) DESC | |
488 | 517 | """ |
489 | 518 | |
490 | 519 | def _get_aggregation_groups_for_users_txn( |
491 | 520 | txn: LoggingTransaction, |
492 | ) -> Dict[Tuple[str, str], int]: | |
493 | txn.execute(sql, args + [limit]) | |
494 | ||
495 | return {(row[0], row[1]): row[2] for row in txn} | |
521 | ) -> Dict[str, Dict[Tuple[str, str], int]]: | |
522 | txn.execute(sql, args) | |
523 | ||
524 | result: Dict[str, Dict[Tuple[str, str], int]] = {} | |
525 | for event_id, type, key, count in cast( | |
526 | List[Tuple[str, str, str, int]], txn | |
527 | ): | |
528 | result.setdefault(event_id, {})[(type, key)] = count | |
529 | ||
530 | return result | |
496 | 531 | |
497 | 532 | return await self.db_pool.runInteraction( |
498 | 533 | "get_aggregation_groups_for_users", _get_aggregation_groups_for_users_txn |
534 | ) | |
535 | ||
536 | @cached() | |
537 | async def get_references_for_event(self, event_id: str) -> List[JsonDict]: | |
538 | raise NotImplementedError() | |
539 | ||
540 | @cachedList(cached_method_name="get_references_for_event", list_name="event_ids") | |
541 | async def get_references_for_events( | |
542 | self, event_ids: Collection[str] | |
543 | ) -> Mapping[str, Optional[List[_RelatedEvent]]]: | |
544 | """Get a list of references to the given events. | |
545 | ||
546 | Args: | |
547 | event_ids: Fetch events that relate to these event IDs. | |
548 | ||
549 | Returns: | |
550 | A map of event IDs to a list of related event IDs (and their senders). | |
551 | """ | |
552 | ||
553 | clause, args = make_in_list_sql_clause( | |
554 | self.database_engine, "relates_to_id", event_ids | |
555 | ) | |
556 | args.append(RelationTypes.REFERENCE) | |
557 | ||
558 | sql = f""" | |
559 | SELECT relates_to_id, ref.event_id, ref.sender | |
560 | FROM events AS ref | |
561 | INNER JOIN event_relations USING (event_id) | |
562 | INNER JOIN events AS parent ON | |
563 | parent.event_id = relates_to_id | |
564 | AND parent.room_id = ref.room_id | |
565 | WHERE | |
566 | {clause} | |
567 | AND relation_type = ? | |
568 | ORDER BY ref.topological_ordering, ref.stream_ordering | |
569 | """ | |
570 | ||
571 | def _get_references_for_events_txn( | |
572 | txn: LoggingTransaction, | |
573 | ) -> Mapping[str, List[_RelatedEvent]]: | |
574 | txn.execute(sql, args) | |
575 | ||
576 | result: Dict[str, List[_RelatedEvent]] = {} | |
577 | for relates_to_id, event_id, sender in cast( | |
578 | List[Tuple[str, str, str]], txn | |
579 | ): | |
580 | result.setdefault(relates_to_id, []).append( | |
581 | _RelatedEvent(event_id, sender) | |
582 | ) | |
583 | ||
584 | return result | |
585 | ||
586 | return await self.db_pool.runInteraction( | |
587 | "_get_references_for_events_txn", _get_references_for_events_txn | |
499 | 588 | ) |
500 | 589 | |
501 | 590 | @cached() |
911 | 911 | event_json = db_to_json(content_json) |
912 | 912 | content = event_json["content"] |
913 | 913 | content_url = content.get("url") |
914 | thumbnail_url = content.get("info", {}).get("thumbnail_url") | |
914 | info = content.get("info") | |
915 | if isinstance(info, dict): | |
916 | thumbnail_url = info.get("thumbnail_url") | |
917 | else: | |
918 | thumbnail_url = None | |
915 | 919 | |
916 | 920 | for url in (content_url, thumbnail_url): |
917 | 921 | if not url: |
1842 | 1846 | "creator": room_creator, |
1843 | 1847 | "has_auth_chain_index": has_auth_chain_index, |
1844 | 1848 | }, |
1845 | # rooms has a unique constraint on room_id, so no need to lock when doing an | |
1846 | # emulated upsert. | |
1847 | lock=False, | |
1848 | 1849 | ) |
1849 | 1850 | |
1850 | 1851 | async def store_partial_state_room( |
1965 | 1966 | "creator": "", |
1966 | 1967 | "has_auth_chain_index": has_auth_chain_index, |
1967 | 1968 | }, |
1968 | # rooms has a unique constraint on room_id, so no need to lock when doing an | |
1969 | # emulated upsert. | |
1970 | lock=False, | |
1971 | 1969 | ) |
1972 | 1970 | |
1973 | 1971 | async def set_room_is_public(self, room_id: str, is_public: bool) -> None: |
2056 | 2054 | Args: |
2057 | 2055 | report_id: ID of reported event in database |
2058 | 2056 | Returns: |
2059 | event_report: json list of information from event report | |
2057 | JSON dict of information from an event report or None if the | |
2058 | report does not exist. | |
2060 | 2059 | """ |
2061 | 2060 | |
2062 | 2061 | def _get_event_report_txn( |
2129 | 2128 | user_id: search for user_id. Ignored if user_id is None |
2130 | 2129 | room_id: search for room_id. Ignored if room_id is None |
2131 | 2130 | Returns: |
2132 | event_reports: json list of event reports | |
2133 | count: total number of event reports matching the filter criteria | |
2131 | Tuple of: | |
2132 | json list of event reports | |
2133 | total number of event reports matching the filter criteria | |
2134 | 2134 | """ |
2135 | 2135 | |
2136 | 2136 | def _get_event_reports_paginate_txn( |
43 | 43 | table="event_to_state_groups", |
44 | 44 | keyvalues={"event_id": event_id}, |
45 | 45 | values={"state_group": state_group_id, "event_id": event_id}, |
46 | # Unique constraint on event_id so we don't have to lock | |
47 | lock=False, | |
48 | 46 | ) |
184 | 184 | - who should be in the user_directory. |
185 | 185 | |
186 | 186 | Args: |
187 | progress (dict) | |
188 | batch_size (int): Maximum number of state events to process | |
189 | per cycle. | |
187 | progress | |
188 | batch_size: Maximum number of state events to process per cycle. | |
190 | 189 | |
191 | 190 | Returns: |
192 | 191 | number of events processed. |
481 | 480 | table="user_directory", |
482 | 481 | keyvalues={"user_id": user_id}, |
483 | 482 | values={"display_name": display_name, "avatar_url": avatar_url}, |
484 | lock=False, # We're only inserter | |
485 | 483 | ) |
486 | 484 | |
487 | 485 | if isinstance(self.database_engine, PostgresEngine): |
511 | 509 | table="user_directory_search", |
512 | 510 | keyvalues={"user_id": user_id}, |
513 | 511 | values={"value": value}, |
514 | lock=False, # We're only inserter | |
515 | 512 | ) |
516 | 513 | else: |
517 | 514 | # This should be unreachable. |
707 | 704 | Returns the rooms that a user is in. |
708 | 705 | |
709 | 706 | Args: |
710 | user_id(str): Must be a local user | |
707 | user_id: Must be a local user | |
711 | 708 | |
712 | 709 | Returns: |
713 | list: user_id | |
710 | List of room IDs | |
714 | 711 | """ |
715 | 712 | rows = await self.db_pool.simple_select_onecol( |
716 | 713 | table="users_who_share_private_rooms", |
92 | 92 | |
93 | 93 | results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups} |
94 | 94 | |
95 | where_clause, where_args = state_filter.make_sql_filter_clause() | |
96 | ||
97 | # Unless the filter clause is empty, we're going to append it after an | |
98 | # existing where clause | |
99 | if where_clause: | |
100 | where_clause = " AND (%s)" % (where_clause,) | |
101 | ||
102 | 95 | if isinstance(self.database_engine, PostgresEngine): |
103 | 96 | # Temporarily disable sequential scans in this transaction. This is |
104 | 97 | # a temporary hack until we can add the right indices in |
109 | 102 | # against `state_groups_state` to fetch the latest state. |
110 | 103 | # It assumes that previous state groups are always numerically |
111 | 104 | # lesser. |
112 | # The PARTITION is used to get the event_id in the greatest state | |
113 | # group for the given type, state_key. | |
114 | 105 | # This may return multiple rows per (type, state_key), but last_value |
115 | 106 | # should be the same. |
116 | 107 | sql = """ |
117 | WITH RECURSIVE state(state_group) AS ( | |
108 | WITH RECURSIVE sgs(state_group) AS ( | |
118 | 109 | VALUES(?::bigint) |
119 | 110 | UNION ALL |
120 | SELECT prev_state_group FROM state_group_edges e, state s | |
111 | SELECT prev_state_group FROM state_group_edges e, sgs s | |
121 | 112 | WHERE s.state_group = e.state_group |
122 | 113 | ) |
123 | SELECT DISTINCT ON (type, state_key) | |
124 | type, state_key, event_id | |
125 | FROM state_groups_state | |
126 | WHERE state_group IN ( | |
127 | SELECT state_group FROM state | |
128 | ) %s | |
129 | ORDER BY type, state_key, state_group DESC | |
114 | %s | |
130 | 115 | """ |
116 | ||
117 | overall_select_query_args: List[Union[int, str]] = [] | |
118 | ||
119 | # This is an optimization to create a select clause per-condition. This | |
120 | # makes the query planner a lot smarter on what rows should pull out in the | |
121 | # first place and we end up with something that takes 10x less time to get a | |
122 | # result. | |
123 | use_condition_optimization = ( | |
124 | not state_filter.include_others and not state_filter.is_full() | |
125 | ) | |
126 | state_filter_condition_combos: List[Tuple[str, Optional[str]]] = [] | |
127 | # We don't need to caclculate this list if we're not using the condition | |
128 | # optimization | |
129 | if use_condition_optimization: | |
130 | for etype, state_keys in state_filter.types.items(): | |
131 | if state_keys is None: | |
132 | state_filter_condition_combos.append((etype, None)) | |
133 | else: | |
134 | for state_key in state_keys: | |
135 | state_filter_condition_combos.append((etype, state_key)) | |
136 | # And here is the optimization itself. We don't want to do the optimization | |
137 | # if there are too many individual conditions. 10 is an arbitrary number | |
138 | # with no testing behind it but we do know that we specifically made this | |
139 | # optimization for when we grab the necessary state out for | |
140 | # `filter_events_for_client` which just uses 2 conditions | |
141 | # (`EventTypes.RoomHistoryVisibility` and `EventTypes.Member`). | |
142 | if use_condition_optimization and len(state_filter_condition_combos) < 10: | |
143 | select_clause_list: List[str] = [] | |
144 | for etype, skey in state_filter_condition_combos: | |
145 | if skey is None: | |
146 | where_clause = "(type = ?)" | |
147 | overall_select_query_args.extend([etype]) | |
148 | else: | |
149 | where_clause = "(type = ? AND state_key = ?)" | |
150 | overall_select_query_args.extend([etype, skey]) | |
151 | ||
152 | select_clause_list.append( | |
153 | f""" | |
154 | ( | |
155 | SELECT DISTINCT ON (type, state_key) | |
156 | type, state_key, event_id | |
157 | FROM state_groups_state | |
158 | INNER JOIN sgs USING (state_group) | |
159 | WHERE {where_clause} | |
160 | ORDER BY type, state_key, state_group DESC | |
161 | ) | |
162 | """ | |
163 | ) | |
164 | ||
165 | overall_select_clause = " UNION ".join(select_clause_list) | |
166 | else: | |
167 | where_clause, where_args = state_filter.make_sql_filter_clause() | |
168 | # Unless the filter clause is empty, we're going to append it after an | |
169 | # existing where clause | |
170 | if where_clause: | |
171 | where_clause = " AND (%s)" % (where_clause,) | |
172 | ||
173 | overall_select_query_args.extend(where_args) | |
174 | ||
175 | overall_select_clause = f""" | |
176 | SELECT DISTINCT ON (type, state_key) | |
177 | type, state_key, event_id | |
178 | FROM state_groups_state | |
179 | WHERE state_group IN ( | |
180 | SELECT state_group FROM sgs | |
181 | ) {where_clause} | |
182 | ORDER BY type, state_key, state_group DESC | |
183 | """ | |
131 | 184 | |
132 | 185 | for group in groups: |
133 | 186 | args: List[Union[int, str]] = [group] |
134 | args.extend(where_args) | |
135 | ||
136 | txn.execute(sql % (where_clause,), args) | |
187 | args.extend(overall_select_query_args) | |
188 | ||
189 | txn.execute(sql % (overall_select_clause,), args) | |
137 | 190 | for row in txn: |
138 | 191 | typ, state_key, event_id = row |
139 | 192 | key = (intern_string(typ), intern_string(state_key)) |
140 | 193 | results[group][key] = event_id |
141 | 194 | else: |
142 | 195 | max_entries_returned = state_filter.max_entries_returned() |
196 | ||
197 | where_clause, where_args = state_filter.make_sql_filter_clause() | |
198 | # Unless the filter clause is empty, we're going to append it after an | |
199 | # existing where clause | |
200 | if where_clause: | |
201 | where_clause = " AND (%s)" % (where_clause,) | |
143 | 202 | |
144 | 203 | # We don't use WITH RECURSIVE on sqlite3 as there are distributions |
145 | 204 | # that ship with an sqlite3 version that doesn't support it (e.g. wheezy) |
0 | /* Copyright 2022 The Matrix.org Foundation C.I.C | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | -- Prior to this schema delta, we tracked the set of unconverted rows in | |
16 | -- `device_lists_changes_in_room` using the `converted_to_destinations` flag. When rows | |
17 | -- were converted to `device_lists_outbound_pokes`, the `converted_to_destinations` flag | |
18 | -- would be set. | |
19 | -- | |
20 | -- After this schema delta, the `converted_to_destinations` is still populated like | |
21 | -- before, but the set of unconverted rows is determined by the `stream_id` in the new | |
22 | -- `device_lists_changes_converted_stream_position` table. | |
23 | -- | |
24 | -- If rolled back, Synapse will re-send all device list changes that happened since the | |
25 | -- schema delta. | |
26 | ||
27 | CREATE TABLE IF NOT EXISTS device_lists_changes_converted_stream_position( | |
28 | Lock CHAR(1) NOT NULL DEFAULT 'X' UNIQUE, -- Makes sure this table only has one row. | |
29 | -- The (stream id, room id) of the last row in `device_lists_changes_in_room` that | |
30 | -- has been converted to `device_lists_outbound_pokes`. Rows with a strictly larger | |
31 | -- (stream id, room id) where `converted_to_destinations` is `FALSE` have not been | |
32 | -- converted. | |
33 | stream_id BIGINT NOT NULL, | |
34 | -- `room_id` may be an empty string, which compares less than all valid room IDs. | |
35 | room_id TEXT NOT NULL, | |
36 | CHECK (Lock='X') | |
37 | ); | |
38 | ||
39 | INSERT INTO device_lists_changes_converted_stream_position (stream_id, room_id) VALUES ( | |
40 | ( | |
41 | SELECT COALESCE( | |
42 | -- The last converted stream id is the smallest unconverted stream id minus | |
43 | -- one. | |
44 | MIN(stream_id) - 1, | |
45 | -- If there is no unconverted stream id, the last converted stream id is the | |
46 | -- largest stream id. | |
47 | -- Otherwise, pick 1, since stream ids start at 2. | |
48 | (SELECT COALESCE(MAX(stream_id), 1) FROM device_lists_changes_in_room) | |
49 | ) FROM device_lists_changes_in_room WHERE NOT converted_to_destinations | |
50 | ), | |
51 | '' | |
52 | ); |
0 | /* Copyright 2022 The Matrix.org Foundation C.I.C | |
1 | * | |
2 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
3 | * you may not use this file except in compliance with the License. | |
4 | * You may obtain a copy of the License at | |
5 | * | |
6 | * http://www.apache.org/licenses/LICENSE-2.0 | |
7 | * | |
8 | * Unless required by applicable law or agreed to in writing, software | |
9 | * distributed under the License is distributed on an "AS IS" BASIS, | |
10 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
11 | * See the License for the specific language governing permissions and | |
12 | * limitations under the License. | |
13 | */ | |
14 | ||
15 | ||
16 | -- Adds an index on `device_lists_changes_in_room (room_id, stream_id)`, which | |
17 | -- speeds up `/sync` queries. | |
18 | INSERT INTO background_updates (ordering, update_name, progress_json) VALUES | |
19 | (7313, 'device_lists_changes_in_room_by_room_index', '{}'); |
185 | 185 | column: str, |
186 | 186 | extra_tables: Iterable[Tuple[str, str]] = (), |
187 | 187 | step: int = 1, |
188 | is_writer: bool = True, | |
188 | 189 | ) -> None: |
189 | 190 | assert step != 0 |
190 | 191 | self._lock = threading.Lock() |
191 | 192 | self._step: int = step |
192 | 193 | self._current: int = _load_current_id(db_conn, table, column, step) |
194 | self._is_writer = is_writer | |
193 | 195 | for table, column in extra_tables: |
194 | 196 | self._current = (max if step > 0 else min)( |
195 | 197 | self._current, _load_current_id(db_conn, table, column, step) |
203 | 205 | self._unfinished_ids: OrderedDict[int, int] = OrderedDict() |
204 | 206 | |
205 | 207 | def advance(self, instance_name: str, new_id: int) -> None: |
206 | # `StreamIdGenerator` should only be used when there is a single writer, | |
207 | # so replication should never happen. | |
208 | raise Exception("Replication is not supported by StreamIdGenerator") | |
208 | # Advance should never be called on a writer instance, only over replication | |
209 | if self._is_writer: | |
210 | raise Exception("Replication is not supported by writer StreamIdGenerator") | |
211 | ||
212 | self._current = (max if self._step > 0 else min)(self._current, new_id) | |
209 | 213 | |
210 | 214 | def get_next(self) -> AsyncContextManager[int]: |
211 | 215 | with self._lock: |
248 | 252 | return _AsyncCtxManagerWrapper(manager()) |
249 | 253 | |
250 | 254 | def get_current_token(self) -> int: |
255 | if not self._is_writer: | |
256 | return self._current | |
257 | ||
251 | 258 | with self._lock: |
252 | 259 | if self._unfinished_ids: |
253 | 260 | return next(iter(self._unfinished_ids)) - self._step |
73 | 73 | return token |
74 | 74 | |
75 | 75 | @trace |
76 | async def get_start_token_for_pagination(self, room_id: str) -> StreamToken: | |
77 | """Get the start token for a given room to be used to paginate | |
78 | events. | |
79 | ||
80 | The returned token does not have the current values for fields other | |
81 | than `room`, since they are not used during pagination. | |
82 | ||
83 | Returns: | |
84 | The start token for pagination. | |
85 | """ | |
86 | return StreamToken.START | |
87 | ||
88 | @trace | |
76 | 89 | async def get_current_token_for_pagination(self, room_id: str) -> StreamToken: |
77 | 90 | """Get the current token for a given room to be used to paginate |
78 | 91 | events. |
142 | 142 | Requester. |
143 | 143 | |
144 | 144 | Args: |
145 | store (DataStore): Used to convert AS ID to AS object | |
146 | input (dict): A dict produced by `serialize` | |
145 | store: Used to convert AS ID to AS object | |
146 | input: A dict produced by `serialize` | |
147 | 147 | |
148 | 148 | Returns: |
149 | 149 | Requester |
216 | 216 | limit: Maximum number of conccurent executions. |
217 | 217 | |
218 | 218 | Returns: |
219 | Deferred: Resolved when all function invocations have finished. | |
219 | None, when all function invocations have finished. The return values | |
220 | from those functions are discarded. | |
220 | 221 | """ |
221 | 222 | it = iter(args) |
222 | 223 |
196 | 196 | resize_callback: A function which can be called to resize the cache. |
197 | 197 | |
198 | 198 | Returns: |
199 | CacheMetric: an object which provides inc_{hits,misses,evictions} methods | |
199 | an object which provides inc_{hits,misses,evictions} methods | |
200 | 200 | """ |
201 | 201 | if resizable: |
202 | 202 | if not resize_callback: |
152 | 152 | Args: |
153 | 153 | key: |
154 | 154 | callback: Gets called when the entry in the cache is invalidated |
155 | update_metrics (bool): whether to update the cache hit rate metrics | |
155 | update_metrics: whether to update the cache hit rate metrics | |
156 | 156 | |
157 | 157 | Returns: |
158 | 158 | A Deferred which completes with the result. Note that this may later fail |
502 | 502 | is specified as a list that is iterated through to lookup keys in the |
503 | 503 | original cache. A new tuple consisting of the (deduplicated) keys that weren't in |
504 | 504 | the cache gets passed to the original function, which is expected to results |
505 | in a map of key to value for each passed value. THe new results are stored in the | |
505 | in a map of key to value for each passed value. The new results are stored in the | |
506 | 506 | original cache. Note that any missing values are cached as None. |
507 | 507 | |
508 | 508 | Args: |
168 | 168 | if it is in the cache. |
169 | 169 | |
170 | 170 | Returns: |
171 | DictionaryEntry: If `dict_keys` is not None then `DictionaryEntry` | |
172 | will contain include the keys that are in the cache. If None then | |
173 | will either return the full dict if in the cache, or the empty | |
174 | dict (with `full` set to False) if it isn't. | |
171 | If `dict_keys` is not None then `DictionaryEntry` will contain include | |
172 | the keys that are in the cache. | |
173 | ||
174 | If None then will either return the full dict if in the cache, or the | |
175 | empty dict (with `full` set to False) if it isn't. | |
175 | 176 | """ |
176 | 177 | if dict_keys is None: |
177 | 178 | # The caller wants the full set of dictionary keys for this cache key |
206 | 206 | items from the cache. |
207 | 207 | |
208 | 208 | Returns: |
209 | bool: Whether the cache changed size or not. | |
209 | Whether the cache changed size or not. | |
210 | 210 | """ |
211 | 211 | new_size = int(self._original_max_size * factor) |
212 | 212 | if new_size != self._max_size: |
388 | 388 | cache_name: The name of this cache, for the prometheus metrics. If unset, |
389 | 389 | no metrics will be reported on this cache. |
390 | 390 | |
391 | cache_type (type): | |
391 | cache_type: | |
392 | 392 | type of underlying cache to be used. Typically one of dict |
393 | 393 | or TreeCache. |
394 | 394 | |
395 | size_callback (func(V) -> int | None): | |
395 | size_callback: | |
396 | 396 | |
397 | 397 | metrics_collection_callback: |
398 | 398 | metrics collection callback. This is called early in the metrics |
402 | 402 | |
403 | 403 | Ignored if cache_name is None. |
404 | 404 | |
405 | apply_cache_factor_from_config (bool): If true, `max_size` will be | |
405 | apply_cache_factor_from_config: If true, `max_size` will be | |
406 | 406 | multiplied by a cache factor derived from the homeserver config |
407 | 407 | |
408 | 408 | clock: |
795 | 795 | items from the cache. |
796 | 796 | |
797 | 797 | Returns: |
798 | bool: Whether the cache changed size or not. | |
798 | Whether the cache changed size or not. | |
799 | 799 | """ |
800 | 800 | if not self.apply_cache_factor_from_config: |
801 | 801 | return False |
182 | 182 | # Handle request ... |
183 | 183 | |
184 | 184 | Args: |
185 | host (str): Origin of incoming request. | |
185 | host: Origin of incoming request. | |
186 | 186 | |
187 | 187 | Returns: |
188 | 188 | context manager which returns a deferred. |
47 | 47 | registration: whether we want to bind the 3PID as part of registering a new user. |
48 | 48 | |
49 | 49 | Returns: |
50 | bool: whether the 3PID medium/address is allowed to be added to this HS | |
50 | whether the 3PID medium/address is allowed to be added to this HS | |
51 | 51 | """ |
52 | 52 | if not await hs.get_password_auth_provider().is_3pid_allowed( |
53 | 53 | medium, address, registration |
89 | 89 | """Fetch any objects that have timed out |
90 | 90 | |
91 | 91 | Args: |
92 | now (ms): Current time in msec | |
92 | now: Current time in msec | |
93 | 93 | |
94 | 94 | Returns: |
95 | list: List of objects that have timed out | |
95 | List of objects that have timed out | |
96 | 96 | """ |
97 | 97 | now_key = int(now / self.bucket_size) |
98 | 98 |
562 | 562 | |
563 | 563 | async def filter_events_for_server( |
564 | 564 | storage: StorageControllers, |
565 | server_name: str, | |
565 | target_server_name: str, | |
566 | local_server_name: str, | |
566 | 567 | events: List[EventBase], |
567 | 568 | redact: bool = True, |
568 | 569 | check_history_visibility_only: bool = False, |
602 | 603 | # if the server is either in the room or has been invited |
603 | 604 | # into the room. |
604 | 605 | for ev in memberships.values(): |
605 | assert get_domain_from_id(ev.state_key) == server_name | |
606 | assert get_domain_from_id(ev.state_key) == target_server_name | |
606 | 607 | |
607 | 608 | memtype = ev.membership |
608 | 609 | if memtype == Membership.JOIN: |
620 | 621 | # We don't want to check whether users are erased, which is equivalent |
621 | 622 | # to no users having been erased. |
622 | 623 | erased_senders = {} |
624 | ||
625 | # Filter out non-local events when we are in the middle of a partial join, since our servers | |
626 | # list can be out of date and we could leak events to servers not in the room anymore. | |
627 | # This can also be true for local events but we consider it to be an acceptable risk. | |
628 | ||
629 | # We do this check as a first step and before retrieving membership events because | |
630 | # otherwise a room could be fully joined after we retrieve those, which would then bypass | |
631 | # this check but would base the filtering on an outdated view of the membership events. | |
632 | ||
633 | partial_state_invisible_events = set() | |
634 | if not check_history_visibility_only: | |
635 | for e in events: | |
636 | sender_domain = get_domain_from_id(e.sender) | |
637 | if ( | |
638 | sender_domain != local_server_name | |
639 | and await storage.main.is_partial_state_room(e.room_id) | |
640 | ): | |
641 | partial_state_invisible_events.add(e) | |
623 | 642 | |
624 | 643 | # Let's check to see if all the events have a history visibility |
625 | 644 | # of "shared" or "world_readable". If that's the case then we don't |
635 | 654 | if event_to_history_vis[e.event_id] |
636 | 655 | not in (HistoryVisibility.SHARED, HistoryVisibility.WORLD_READABLE) |
637 | 656 | ], |
638 | server_name, | |
657 | target_server_name, | |
639 | 658 | ) |
640 | 659 | |
641 | 660 | to_return = [] |
644 | 663 | visible = check_event_is_visible( |
645 | 664 | event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {}) |
646 | 665 | ) |
666 | ||
667 | if e in partial_state_invisible_events: | |
668 | visible = False | |
669 | ||
647 | 670 | if visible and not erased: |
648 | 671 | to_return.append(e) |
649 | 672 | elif redact: |
68 | 68 | events=events, |
69 | 69 | ephemeral=[], |
70 | 70 | to_device_messages=[], # txn made and saved |
71 | one_time_key_counts={}, | |
71 | one_time_keys_count={}, | |
72 | 72 | unused_fallback_keys={}, |
73 | 73 | device_list_summary=DeviceListUpdates(), |
74 | 74 | ) |
95 | 95 | events=events, |
96 | 96 | ephemeral=[], |
97 | 97 | to_device_messages=[], # txn made and saved |
98 | one_time_key_counts={}, | |
98 | one_time_keys_count={}, | |
99 | 99 | unused_fallback_keys={}, |
100 | 100 | device_list_summary=DeviceListUpdates(), |
101 | 101 | ) |
124 | 124 | events=events, |
125 | 125 | ephemeral=[], |
126 | 126 | to_device_messages=[], |
127 | one_time_key_counts={}, | |
127 | one_time_keys_count={}, | |
128 | 128 | unused_fallback_keys={}, |
129 | 129 | device_list_summary=DeviceListUpdates(), |
130 | 130 | ) |
468 | 468 | keys = self.get_success(fetcher.get_keys(SERVER_NAME, ["key1"], 0)) |
469 | 469 | self.assertEqual(keys, {}) |
470 | 470 | |
471 | def test_keyid_containing_forward_slash(self) -> None: | |
472 | """We should url-encode any url unsafe chars in key ids. | |
473 | ||
474 | Detects https://github.com/matrix-org/synapse/issues/14488. | |
475 | """ | |
476 | fetcher = ServerKeyFetcher(self.hs) | |
477 | self.get_success(fetcher.get_keys("example.com", ["key/potato"], 0)) | |
478 | ||
479 | self.http_client.get_json.assert_called_once() | |
480 | args, kwargs = self.http_client.get_json.call_args | |
481 | self.assertEqual(kwargs["path"], "/_matrix/key/v2/server/key%2Fpotato") | |
482 | ||
471 | 483 | |
472 | 484 | class PerspectivesKeyFetcherTestCase(unittest.HomeserverTestCase): |
473 | 485 | def make_homeserver(self, reactor, clock): |
83 | 83 | ) |
84 | 84 | |
85 | 85 | @override_config({"send_federation": True}) |
86 | def test_send_receipts_thread(self): | |
87 | mock_send_transaction = ( | |
88 | self.hs.get_federation_transport_client().send_transaction | |
89 | ) | |
90 | mock_send_transaction.return_value = make_awaitable({}) | |
91 | ||
92 | # Create receipts for: | |
93 | # | |
94 | # * The same room / user on multiple threads. | |
95 | # * A different user in the same room. | |
96 | sender = self.hs.get_federation_sender() | |
97 | for user, thread in ( | |
98 | ("alice", None), | |
99 | ("alice", "thread"), | |
100 | ("bob", None), | |
101 | ("bob", "diff-thread"), | |
102 | ): | |
103 | receipt = ReadReceipt( | |
104 | "room_id", | |
105 | "m.read", | |
106 | user, | |
107 | ["event_id"], | |
108 | thread_id=thread, | |
109 | data={"ts": 1234}, | |
110 | ) | |
111 | self.successResultOf( | |
112 | defer.ensureDeferred(sender.send_read_receipt(receipt)) | |
113 | ) | |
114 | ||
115 | self.pump() | |
116 | ||
117 | # expect a call to send_transaction with two EDUs to separate threads. | |
118 | mock_send_transaction.assert_called_once() | |
119 | json_cb = mock_send_transaction.call_args[0][1] | |
120 | data = json_cb() | |
121 | # Note that the ordering of the EDUs doesn't matter. | |
122 | self.assertCountEqual( | |
123 | data["edus"], | |
124 | [ | |
125 | { | |
126 | "edu_type": EduTypes.RECEIPT, | |
127 | "content": { | |
128 | "room_id": { | |
129 | "m.read": { | |
130 | "alice": { | |
131 | "event_ids": ["event_id"], | |
132 | "data": {"ts": 1234, "thread_id": "thread"}, | |
133 | }, | |
134 | "bob": { | |
135 | "event_ids": ["event_id"], | |
136 | "data": {"ts": 1234, "thread_id": "diff-thread"}, | |
137 | }, | |
138 | } | |
139 | } | |
140 | }, | |
141 | }, | |
142 | { | |
143 | "edu_type": EduTypes.RECEIPT, | |
144 | "content": { | |
145 | "room_id": { | |
146 | "m.read": { | |
147 | "alice": { | |
148 | "event_ids": ["event_id"], | |
149 | "data": {"ts": 1234}, | |
150 | }, | |
151 | "bob": { | |
152 | "event_ids": ["event_id"], | |
153 | "data": {"ts": 1234}, | |
154 | }, | |
155 | } | |
156 | } | |
157 | }, | |
158 | }, | |
159 | ], | |
160 | ) | |
161 | ||
162 | @override_config({"send_federation": True}) | |
86 | 163 | def test_send_receipts_with_backoff(self): |
87 | 164 | """Send two receipts in quick succession; the second should be flushed, but |
88 | 165 | only after 20ms""" |
24 | 24 | from synapse.api.constants import EduTypes, EventTypes |
25 | 25 | from synapse.appservice import ( |
26 | 26 | ApplicationService, |
27 | TransactionOneTimeKeyCounts, | |
27 | TransactionOneTimeKeysCount, | |
28 | 28 | TransactionUnusedFallbackKeys, |
29 | 29 | ) |
30 | 30 | from synapse.handlers.appservice import ApplicationServicesHandler |
1122 | 1122 | # Capture what was sent as an AS transaction. |
1123 | 1123 | self.send_mock.assert_called() |
1124 | 1124 | last_args, _last_kwargs = self.send_mock.call_args |
1125 | otks: Optional[TransactionOneTimeKeyCounts] = last_args[self.ARG_OTK_COUNTS] | |
1125 | otks: Optional[TransactionOneTimeKeysCount] = last_args[self.ARG_OTK_COUNTS] | |
1126 | 1126 | unused_fallbacks: Optional[TransactionUnusedFallbackKeys] = last_args[ |
1127 | 1127 | self.ARG_FALLBACK_KEYS |
1128 | 1128 | ] |
18 | 18 | from twisted.test.proto_helpers import MemoryReactor |
19 | 19 | |
20 | 20 | from synapse.api.errors import NotFoundError, SynapseError |
21 | from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN | |
21 | from synapse.handlers.device import MAX_DEVICE_DISPLAY_NAME_LEN, DeviceHandler | |
22 | 22 | from synapse.server import HomeServer |
23 | 23 | from synapse.util import Clock |
24 | 24 | |
31 | 31 | class DeviceTestCase(unittest.HomeserverTestCase): |
32 | 32 | def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: |
33 | 33 | hs = self.setup_test_homeserver("server", federation_http_client=None) |
34 | self.handler = hs.get_device_handler() | |
34 | handler = hs.get_device_handler() | |
35 | assert isinstance(handler, DeviceHandler) | |
36 | self.handler = handler | |
35 | 37 | self.store = hs.get_datastores().main |
36 | 38 | return hs |
37 | 39 | |
60 | 62 | self.assertEqual(res, "fco") |
61 | 63 | |
62 | 64 | dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco")) |
65 | assert dev is not None | |
63 | 66 | self.assertEqual(dev["display_name"], "display name") |
64 | 67 | |
65 | 68 | def test_device_is_preserved_if_exists(self) -> None: |
82 | 85 | self.assertEqual(res2, "fco") |
83 | 86 | |
84 | 87 | dev = self.get_success(self.handler.store.get_device("@boris:foo", "fco")) |
88 | assert dev is not None | |
85 | 89 | self.assertEqual(dev["display_name"], "display name") |
86 | 90 | |
87 | 91 | def test_device_id_is_made_up_if_unspecified(self) -> None: |
94 | 98 | ) |
95 | 99 | |
96 | 100 | dev = self.get_success(self.handler.store.get_device("@theresa:foo", device_id)) |
101 | assert dev is not None | |
97 | 102 | self.assertEqual(dev["display_name"], "display") |
98 | 103 | |
99 | 104 | def test_get_devices_by_user(self) -> None: |
263 | 268 | class DehydrationTestCase(unittest.HomeserverTestCase): |
264 | 269 | def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: |
265 | 270 | hs = self.setup_test_homeserver("server", federation_http_client=None) |
266 | self.handler = hs.get_device_handler() | |
271 | handler = hs.get_device_handler() | |
272 | assert isinstance(handler, DeviceHandler) | |
273 | self.handler = handler | |
267 | 274 | self.registration = hs.get_registration_handler() |
268 | 275 | self.auth = hs.get_auth() |
269 | 276 | self.store = hs.get_datastores().main |
283 | 290 | ) |
284 | 291 | ) |
285 | 292 | |
286 | retrieved_device_id, device_data = self.get_success( | |
287 | self.handler.get_dehydrated_device(user_id=user_id) | |
288 | ) | |
293 | result = self.get_success(self.handler.get_dehydrated_device(user_id=user_id)) | |
294 | assert result is not None | |
295 | retrieved_device_id, device_data = result | |
289 | 296 | |
290 | 297 | self.assertEqual(retrieved_device_id, stored_dehydrated_device_id) |
291 | 298 | self.assertEqual(device_data, {"device_data": {"foo": "bar"}}) |
14 | 14 | from typing import Optional |
15 | 15 | from unittest.mock import Mock, call |
16 | 16 | |
17 | from parameterized import parameterized | |
17 | 18 | from signedjson.key import generate_signing_key |
18 | 19 | |
19 | 20 | from synapse.api.constants import EventTypes, Membership, PresenceState |
36 | 37 | from synapse.types import UserID, get_domain_from_id |
37 | 38 | |
38 | 39 | from tests import unittest |
40 | from tests.replication._base import BaseMultiWorkerStreamTestCase | |
39 | 41 | |
40 | 42 | |
41 | 43 | class PresenceUpdateTestCase(unittest.HomeserverTestCase): |
504 | 506 | self.assertEqual(state, new_state) |
505 | 507 | |
506 | 508 | |
507 | class PresenceHandlerTestCase(unittest.HomeserverTestCase): | |
509 | class PresenceHandlerTestCase(BaseMultiWorkerStreamTestCase): | |
508 | 510 | def prepare(self, reactor, clock, hs): |
509 | 511 | self.presence_handler = hs.get_presence_handler() |
510 | 512 | self.clock = hs.get_clock() |
715 | 717 | # our status message should be the same as it was before |
716 | 718 | self.assertEqual(state.status_msg, status_msg) |
717 | 719 | |
718 | def test_set_presence_from_syncing_keeps_busy(self): | |
719 | """Test that presence set by syncing doesn't affect busy status""" | |
720 | # while this isn't the default | |
721 | self.presence_handler._busy_presence_enabled = True | |
722 | ||
720 | @parameterized.expand([(False,), (True,)]) | |
721 | @unittest.override_config( | |
722 | { | |
723 | "experimental_features": { | |
724 | "msc3026_enabled": True, | |
725 | }, | |
726 | } | |
727 | ) | |
728 | def test_set_presence_from_syncing_keeps_busy(self, test_with_workers: bool): | |
729 | """Test that presence set by syncing doesn't affect busy status | |
730 | ||
731 | Args: | |
732 | test_with_workers: If True, check the presence state of the user by calling | |
733 | /sync against a worker, rather than the main process. | |
734 | """ | |
723 | 735 | user_id = "@test:server" |
724 | 736 | status_msg = "I'm busy!" |
725 | 737 | |
738 | # By default, we call /sync against the main process. | |
739 | worker_to_sync_against = self.hs | |
740 | if test_with_workers: | |
741 | # Create a worker and use it to handle /sync traffic instead. | |
742 | # This is used to test that presence changes get replicated from workers | |
743 | # to the main process correctly. | |
744 | worker_to_sync_against = self.make_worker_hs( | |
745 | "synapse.app.generic_worker", {"worker_name": "presence_writer"} | |
746 | ) | |
747 | ||
748 | # Set presence to BUSY | |
726 | 749 | self._set_presencestate_with_status_msg(user_id, PresenceState.BUSY, status_msg) |
727 | 750 | |
751 | # Perform a sync with a presence state other than busy. This should NOT change | |
752 | # our presence status; we only change from busy if we explicitly set it via | |
753 | # /presence/*. | |
728 | 754 | self.get_success( |
729 | self.presence_handler.user_syncing(user_id, True, PresenceState.ONLINE) | |
730 | ) | |
731 | ||
755 | worker_to_sync_against.get_presence_handler().user_syncing( | |
756 | user_id, True, PresenceState.ONLINE | |
757 | ) | |
758 | ) | |
759 | ||
760 | # Check against the main process that the user's presence did not change. | |
732 | 761 | state = self.get_success( |
733 | 762 | self.presence_handler.get_state(UserID.from_string(user_id)) |
734 | 763 | ) |
0 | # Licensed under the Apache License, Version 2.0 (the "License"); | |
1 | # you may not use this file except in compliance with the License. | |
2 | # You may obtain a copy of the License at | |
3 | # | |
4 | # http://www.apache.org/licenses/LICENSE-2.0 | |
5 | # | |
6 | # Unless required by applicable law or agreed to in writing, software | |
7 | # distributed under the License is distributed on an "AS IS" BASIS, | |
8 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
9 | # See the License for the specific language governing permissions and | |
10 | # limitations under the License. | |
11 | from http import HTTPStatus | |
12 | from typing import BinaryIO, Callable, Dict, List, Optional, Tuple | |
13 | from unittest.mock import Mock | |
14 | ||
15 | from twisted.test.proto_helpers import MemoryReactor | |
16 | from twisted.web.http_headers import Headers | |
17 | ||
18 | from synapse.api.errors import Codes, SynapseError | |
19 | from synapse.http.client import RawHeaders | |
20 | from synapse.server import HomeServer | |
21 | from synapse.util import Clock | |
22 | ||
23 | from tests import unittest | |
24 | from tests.test_utils import SMALL_PNG, FakeResponse | |
25 | ||
26 | ||
27 | class TestSSOHandler(unittest.HomeserverTestCase): | |
28 | def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: | |
29 | self.http_client = Mock(spec=["get_file"]) | |
30 | self.http_client.get_file.side_effect = mock_get_file | |
31 | self.http_client.user_agent = b"Synapse Test" | |
32 | hs = self.setup_test_homeserver( | |
33 | proxied_blacklisted_http_client=self.http_client | |
34 | ) | |
35 | return hs | |
36 | ||
37 | async def test_set_avatar(self) -> None: | |
38 | """Tests successfully setting the avatar of a newly created user""" | |
39 | handler = self.hs.get_sso_handler() | |
40 | ||
41 | # Create a new user to set avatar for | |
42 | reg_handler = self.hs.get_registration_handler() | |
43 | user_id = self.get_success(reg_handler.register_user(approved=True)) | |
44 | ||
45 | self.assertTrue( | |
46 | self.get_success(handler.set_avatar(user_id, "http://my.server/me.png")) | |
47 | ) | |
48 | ||
49 | # Ensure avatar is set on this newly created user, | |
50 | # so no need to compare for the exact image | |
51 | profile_handler = self.hs.get_profile_handler() | |
52 | profile = self.get_success(profile_handler.get_profile(user_id)) | |
53 | self.assertIsNot(profile["avatar_url"], None) | |
54 | ||
55 | @unittest.override_config({"max_avatar_size": 1}) | |
56 | async def test_set_avatar_too_big_image(self) -> None: | |
57 | """Tests that saving an avatar fails when it is too big""" | |
58 | handler = self.hs.get_sso_handler() | |
59 | ||
60 | # any random user works since image check is supposed to fail | |
61 | user_id = "@sso-user:test" | |
62 | ||
63 | self.assertFalse( | |
64 | self.get_success(handler.set_avatar(user_id, "http://my.server/me.png")) | |
65 | ) | |
66 | ||
67 | @unittest.override_config({"allowed_avatar_mimetypes": ["image/jpeg"]}) | |
68 | async def test_set_avatar_incorrect_mime_type(self) -> None: | |
69 | """Tests that saving an avatar fails when its mime type is not allowed""" | |
70 | handler = self.hs.get_sso_handler() | |
71 | ||
72 | # any random user works since image check is supposed to fail | |
73 | user_id = "@sso-user:test" | |
74 | ||
75 | self.assertFalse( | |
76 | self.get_success(handler.set_avatar(user_id, "http://my.server/me.png")) | |
77 | ) | |
78 | ||
79 | async def test_skip_saving_avatar_when_not_changed(self) -> None: | |
80 | """Tests whether saving of avatar correctly skips if the avatar hasn't | |
81 | changed""" | |
82 | handler = self.hs.get_sso_handler() | |
83 | ||
84 | # Create a new user to set avatar for | |
85 | reg_handler = self.hs.get_registration_handler() | |
86 | user_id = self.get_success(reg_handler.register_user(approved=True)) | |
87 | ||
88 | # set avatar for the first time, should be a success | |
89 | self.assertTrue( | |
90 | self.get_success(handler.set_avatar(user_id, "http://my.server/me.png")) | |
91 | ) | |
92 | ||
93 | # get avatar picture for comparison after another attempt | |
94 | profile_handler = self.hs.get_profile_handler() | |
95 | profile = self.get_success(profile_handler.get_profile(user_id)) | |
96 | url_to_match = profile["avatar_url"] | |
97 | ||
98 | # set same avatar for the second time, should be a success | |
99 | self.assertTrue( | |
100 | self.get_success(handler.set_avatar(user_id, "http://my.server/me.png")) | |
101 | ) | |
102 | ||
103 | # compare avatar picture's url from previous step | |
104 | profile = self.get_success(profile_handler.get_profile(user_id)) | |
105 | self.assertEqual(profile["avatar_url"], url_to_match) | |
106 | ||
107 | ||
108 | async def mock_get_file( | |
109 | url: str, | |
110 | output_stream: BinaryIO, | |
111 | max_size: Optional[int] = None, | |
112 | headers: Optional[RawHeaders] = None, | |
113 | is_allowed_content_type: Optional[Callable[[str], bool]] = None, | |
114 | ) -> Tuple[int, Dict[bytes, List[bytes]], str, int]: | |
115 | ||
116 | fake_response = FakeResponse(code=404) | |
117 | if url == "http://my.server/me.png": | |
118 | fake_response = FakeResponse( | |
119 | code=200, | |
120 | headers=Headers( | |
121 | {"Content-Type": ["image/png"], "Content-Length": [str(len(SMALL_PNG))]} | |
122 | ), | |
123 | body=SMALL_PNG, | |
124 | ) | |
125 | ||
126 | if max_size is not None and max_size < len(SMALL_PNG): | |
127 | raise SynapseError( | |
128 | HTTPStatus.BAD_GATEWAY, | |
129 | "Requested file is too large > %r bytes" % (max_size,), | |
130 | Codes.TOO_LARGE, | |
131 | ) | |
132 | ||
133 | if is_allowed_content_type and not is_allowed_content_type("image/png"): | |
134 | raise SynapseError( | |
135 | HTTPStatus.BAD_GATEWAY, | |
136 | ( | |
137 | "Requested file's content type not allowed for this operation: %s" | |
138 | % "image/png" | |
139 | ), | |
140 | ) | |
141 | ||
142 | output_stream.write(fake_response.body) | |
143 | ||
144 | return len(SMALL_PNG), {b"Content-Type": [b"image/png"]}, "", 200 |
12 | 12 | # limitations under the License. |
13 | 13 | import os.path |
14 | 14 | import subprocess |
15 | from typing import List | |
15 | 16 | |
16 | 17 | from zope.interface import implementer |
17 | 18 | |
69 | 70 | """ |
70 | 71 | |
71 | 72 | |
72 | def create_test_cert_file(sanlist): | |
73 | def create_test_cert_file(sanlist: List[bytes]) -> str: | |
73 | 74 | """build an x509 certificate file |
74 | 75 | |
75 | 76 | Args: |
76 | sanlist: list[bytes]: a list of subjectAltName values for the cert | |
77 | sanlist: a list of subjectAltName values for the cert | |
77 | 78 | |
78 | 79 | Returns: |
79 | str: the path to the file | |
80 | The path to the file | |
80 | 81 | """ |
81 | 82 | global cert_file_count |
82 | 83 | csr_filename = "server.csr" |
777 | 777 | worker process. The test users will still sync with the main process. The purpose of testing |
778 | 778 | with a worker is to check whether a Synapse module running on a worker can inform other workers/ |
779 | 779 | the main process that they should include additional presence when a user next syncs. |
780 | If this argument is True, `test_case` MUST be an instance of BaseMultiWorkerStreamTestCase. | |
780 | 781 | """ |
781 | 782 | if test_with_workers: |
783 | assert isinstance(test_case, BaseMultiWorkerStreamTestCase) | |
784 | ||
782 | 785 | # Create a worker process to make module_api calls against |
783 | 786 | worker_hs = test_case.make_worker_hs( |
784 | 787 | "synapse.app.generic_worker", {"worker_name": "presence_writer"} |
60 | 60 | sender_power_level, |
61 | 61 | power_levels.get("notifications", {}), |
62 | 62 | {} if related_events is None else related_events, |
63 | True, | |
64 | event.room_version.msc3931_push_features, | |
63 | 65 | True, |
64 | 66 | ) |
65 | 67 |
541 | 541 | self.send("OK") |
542 | 542 | elif command == b"GET": |
543 | 543 | self.send(None) |
544 | ||
545 | # Connection keep-alives. | |
546 | elif command == b"PING": | |
547 | self.send("PONG") | |
548 | ||
544 | 549 | else: |
545 | raise Exception("Unknown command") | |
550 | raise Exception(f"Unknown command: {command}") | |
546 | 551 | |
547 | 552 | def send(self, msg): |
548 | 553 | """Send a message back to the client.""" |
142 | 142 | self.persist(type="m.room.create", key="", creator=USER_ID) |
143 | 143 | self.check("get_invited_rooms_for_local_user", [USER_ID_2], []) |
144 | 144 | event = self.persist(type="m.room.member", key=USER_ID_2, membership="invite") |
145 | assert event.internal_metadata.stream_ordering is not None | |
145 | 146 | |
146 | 147 | self.replicate() |
147 | 148 | |
229 | 230 | j2 = self.persist( |
230 | 231 | type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join" |
231 | 232 | ) |
233 | assert j2.internal_metadata.stream_ordering is not None | |
232 | 234 | self.replicate() |
233 | 235 | |
234 | 236 | expected_pos = PersistedEventPosition( |
286 | 288 | ) |
287 | 289 | ) |
288 | 290 | self.replicate() |
291 | assert j2.internal_metadata.stream_ordering is not None | |
289 | 292 | |
290 | 293 | event_source = RoomEventSource(self.hs) |
291 | 294 | event_source.store = self.slaved_store |
335 | 338 | |
336 | 339 | event_id = 0 |
337 | 340 | |
338 | def persist(self, backfill=False, **kwargs): | |
341 | def persist(self, backfill=False, **kwargs) -> FrozenEvent: | |
339 | 342 | """ |
340 | 343 | Returns: |
341 | synapse.events.FrozenEvent: The event that was persisted. | |
344 | The event that was persisted. | |
342 | 345 | """ |
343 | 346 | event, context = self.build_event(**kwargs) |
344 | 347 |
14 | 14 | import os |
15 | 15 | from typing import Optional, Tuple |
16 | 16 | |
17 | from twisted.internet.interfaces import IOpenSSLServerConnectionCreator | |
17 | 18 | from twisted.internet.protocol import Factory |
18 | from twisted.protocols.tls import TLSMemoryBIOFactory | |
19 | from twisted.protocols.tls import TLSMemoryBIOFactory, TLSMemoryBIOProtocol | |
19 | 20 | from twisted.web.http import HTTPChannel |
20 | 21 | from twisted.web.server import Request |
21 | 22 | |
101 | 102 | ) |
102 | 103 | |
103 | 104 | # fish the test server back out of the server-side TLS protocol. |
104 | http_server = server_tls_protocol.wrappedProtocol | |
105 | http_server: HTTPChannel = server_tls_protocol.wrappedProtocol # type: ignore[assignment] | |
105 | 106 | |
106 | 107 | # give the reactor a pump to get the TLS juices flowing. |
107 | 108 | self.reactor.pump((0.1,)) |
237 | 238 | return test_server_connection_factory |
238 | 239 | |
239 | 240 | |
240 | def _build_test_server(connection_creator): | |
241 | def _build_test_server( | |
242 | connection_creator: IOpenSSLServerConnectionCreator, | |
243 | ) -> TLSMemoryBIOProtocol: | |
241 | 244 | """Construct a test server |
242 | 245 | |
243 | 246 | This builds an HTTP channel, wrapped with a TLSMemoryBIOProtocol |
244 | 247 | |
245 | 248 | Args: |
246 | connection_creator (IOpenSSLServerConnectionCreator): thing to build | |
247 | SSL connections | |
248 | sanlist (list[bytes]): list of the SAN entries for the cert returned | |
249 | by the server | |
249 | connection_creator: thing to build SSL connections | |
250 | 250 | |
251 | 251 | Returns: |
252 | 252 | TLSMemoryBIOProtocol |
18 | 18 | |
19 | 19 | import synapse.rest.admin |
20 | 20 | from synapse.api.errors import Codes |
21 | from synapse.handlers.device import DeviceHandler | |
21 | 22 | from synapse.rest.client import login |
22 | 23 | from synapse.server import HomeServer |
23 | 24 | from synapse.util import Clock |
33 | 34 | ] |
34 | 35 | |
35 | 36 | def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: |
36 | self.handler = hs.get_device_handler() | |
37 | handler = hs.get_device_handler() | |
38 | assert isinstance(handler, DeviceHandler) | |
39 | self.handler = handler | |
37 | 40 | |
38 | 41 | self.admin_user = self.register_user("admin", "pass", admin=True) |
39 | 42 | self.admin_user_tok = self.login("admin", "pass") |
1855 | 1855 | self.assertEqual(token, channel.json_body["start"]) |
1856 | 1856 | self.assertIn("chunk", channel.json_body) |
1857 | 1857 | self.assertIn("end", channel.json_body) |
1858 | ||
1859 | def test_room_messages_backward(self) -> None: | |
1860 | """Test room messages can be retrieved by an admin that isn't in the room.""" | |
1861 | latest_event_id = self.helper.send( | |
1862 | self.room_id, body="message 1", tok=self.user_tok | |
1863 | )["event_id"] | |
1864 | ||
1865 | # Check that we get the first and second message when querying /messages. | |
1866 | channel = self.make_request( | |
1867 | "GET", | |
1868 | "/_synapse/admin/v1/rooms/%s/messages?dir=b" % (self.room_id,), | |
1869 | access_token=self.admin_user_tok, | |
1870 | ) | |
1871 | self.assertEqual(channel.code, 200, channel.json_body) | |
1872 | ||
1873 | chunk = channel.json_body["chunk"] | |
1874 | self.assertEqual(len(chunk), 6, [event["content"] for event in chunk]) | |
1875 | ||
1876 | # in backwards, this is the first event | |
1877 | self.assertEqual(chunk[0]["event_id"], latest_event_id) | |
1878 | ||
1879 | def test_room_messages_forward(self) -> None: | |
1880 | """Test room messages can be retrieved by an admin that isn't in the room.""" | |
1881 | latest_event_id = self.helper.send( | |
1882 | self.room_id, body="message 1", tok=self.user_tok | |
1883 | )["event_id"] | |
1884 | ||
1885 | # Check that we get the first and second message when querying /messages. | |
1886 | channel = self.make_request( | |
1887 | "GET", | |
1888 | "/_synapse/admin/v1/rooms/%s/messages?dir=f" % (self.room_id,), | |
1889 | access_token=self.admin_user_tok, | |
1890 | ) | |
1891 | self.assertEqual(channel.code, 200, channel.json_body) | |
1892 | ||
1893 | chunk = channel.json_body["chunk"] | |
1894 | self.assertEqual(len(chunk), 6, [event["content"] for event in chunk]) | |
1895 | ||
1896 | # in forward, this is the last event | |
1897 | self.assertEqual(chunk[5]["event_id"], latest_event_id) | |
1858 | 1898 | |
1859 | 1899 | def test_room_messages_purge(self) -> None: |
1860 | 1900 | """Test room messages can be retrieved by an admin that isn't in the room.""" |
1107 | 1107 | |
1108 | 1108 | # The "user" sent the root event and is making queries for the bundled |
1109 | 1109 | # aggregations: they have participated. |
1110 | self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 9) | |
1110 | self._test_bundled_aggregations(RelationTypes.THREAD, _gen_assert(True), 7) | |
1111 | 1111 | # The "user2" sent replies in the thread and is making queries for the |
1112 | 1112 | # bundled aggregations: they have participated. |
1113 | 1113 | # |
1169 | 1169 | bundled_aggregations["latest_event"].get("unsigned"), |
1170 | 1170 | ) |
1171 | 1171 | |
1172 | self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 9) | |
1172 | self._test_bundled_aggregations(RelationTypes.THREAD, assert_thread, 7) | |
1173 | 1173 | |
1174 | 1174 | def test_nested_thread(self) -> None: |
1175 | 1175 | """ |
3545 | 3545 | login.register_servlets, |
3546 | 3546 | ] |
3547 | 3547 | |
3548 | def default_config(self) -> JsonDict: | |
3549 | config = super().default_config() | |
3550 | config["experimental_features"] = {"msc3030_enabled": True} | |
3551 | return config | |
3552 | ||
3553 | 3548 | def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: |
3554 | 3549 | self._storage_controllers = self.hs.get_storage_controllers() |
3555 | 3550 | |
3591 | 3586 | |
3592 | 3587 | channel = self.make_request( |
3593 | 3588 | "GET", |
3594 | f"/_matrix/client/unstable/org.matrix.msc3030/rooms/{room_id}/timestamp_to_event?dir=b&ts={outlier_event.origin_server_ts}", | |
3589 | f"/_matrix/client/v1/rooms/{room_id}/timestamp_to_event?dir=b&ts={outlier_event.origin_server_ts}", | |
3595 | 3590 | access_token=self.room_owner_tok, |
3596 | 3591 | ) |
3597 | 3592 | self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) |
10 | 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
11 | 11 | # See the License for the specific language governing permissions and |
12 | 12 | # limitations under the License. |
13 | from typing import Tuple | |
13 | 14 | from unittest.mock import Mock |
14 | 15 | |
15 | 16 | from twisted.test.proto_helpers import MemoryReactor |
349 | 350 | |
350 | 351 | self.assertTrue(notice_in_room, "No server notice in room") |
351 | 352 | |
352 | def _trigger_notice_and_join(self): | |
353 | def _trigger_notice_and_join(self) -> Tuple[str, str, str]: | |
353 | 354 | """Creates enough active users to hit the MAU limit and trigger a system notice |
354 | 355 | about it, then joins the system notices room with one of the users created. |
355 | 356 | |
356 | 357 | Returns: |
357 | user_id (str): The ID of the user that joined the room. | |
358 | tok (str): The access token of the user that joined the room. | |
359 | room_id (str): The ID of the room that's been joined. | |
358 | A tuple of: | |
359 | user_id: The ID of the user that joined the room. | |
360 | tok: The access token of the user that joined the room. | |
361 | room_id: The ID of the room that's been joined. | |
360 | 362 | """ |
361 | 363 | user_id = None |
362 | 364 | tok = None |
27 | 27 | """ |
28 | 28 | |
29 | 29 | for device_id in device_ids: |
30 | stream_id = self.get_success( | |
30 | self.get_success( | |
31 | 31 | self.store.add_device_change_to_streams( |
32 | 32 | user_id, [device_id], ["!some:room"] |
33 | 33 | ) |
38 | 38 | user_id=user_id, |
39 | 39 | device_id=device_id, |
40 | 40 | room_id="!some:room", |
41 | stream_id=stream_id, | |
42 | 41 | hosts=[host], |
43 | 42 | context={}, |
44 | 43 | ) |
10 | 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
11 | 11 | # See the License for the specific language governing permissions and |
12 | 12 | # limitations under the License. |
13 | from prometheus_client import generate_latest | |
13 | 14 | |
14 | from synapse.metrics import REGISTRY, generate_latest | |
15 | from synapse.metrics import REGISTRY | |
15 | 16 | from synapse.types import UserID, create_requester |
16 | 17 | |
17 | 18 | from tests.unittest import HomeserverTestCase |
52 | 53 | |
53 | 54 | items = list( |
54 | 55 | filter( |
55 | lambda x: b"synapse_forward_extremities_" in x, | |
56 | generate_latest(REGISTRY, emit_help=False).split(b"\n"), | |
56 | lambda x: b"synapse_forward_extremities_" in x and b"# HELP" not in x, | |
57 | generate_latest(REGISTRY).split(b"\n"), | |
57 | 58 | ) |
58 | 59 | ) |
59 | 60 |
15 | 15 | from twisted.test.proto_helpers import MemoryReactor |
16 | 16 | |
17 | 17 | from synapse.server import HomeServer |
18 | from synapse.storage.database import DatabasePool, LoggingTransaction | |
18 | from synapse.storage.database import ( | |
19 | DatabasePool, | |
20 | LoggingDatabaseConnection, | |
21 | LoggingTransaction, | |
22 | ) | |
19 | 23 | from synapse.storage.engines import IncorrectDatabaseSetup |
20 | from synapse.storage.util.id_generators import MultiWriterIdGenerator | |
24 | from synapse.storage.types import Cursor | |
25 | from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator | |
21 | 26 | from synapse.util import Clock |
22 | 27 | |
23 | 28 | from tests.unittest import HomeserverTestCase |
24 | 29 | from tests.utils import USE_POSTGRES_FOR_TESTS |
30 | ||
31 | ||
32 | class StreamIdGeneratorTestCase(HomeserverTestCase): | |
33 | def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: | |
34 | self.store = hs.get_datastores().main | |
35 | self.db_pool: DatabasePool = self.store.db_pool | |
36 | ||
37 | self.get_success(self.db_pool.runInteraction("_setup_db", self._setup_db)) | |
38 | ||
39 | def _setup_db(self, txn: LoggingTransaction) -> None: | |
40 | txn.execute( | |
41 | """ | |
42 | CREATE TABLE foobar ( | |
43 | stream_id BIGINT NOT NULL, | |
44 | data TEXT | |
45 | ); | |
46 | """ | |
47 | ) | |
48 | txn.execute("INSERT INTO foobar VALUES (123, 'hello world');") | |
49 | ||
50 | def _create_id_generator(self) -> StreamIdGenerator: | |
51 | def _create(conn: LoggingDatabaseConnection) -> StreamIdGenerator: | |
52 | return StreamIdGenerator( | |
53 | db_conn=conn, | |
54 | table="foobar", | |
55 | column="stream_id", | |
56 | ) | |
57 | ||
58 | return self.get_success_or_raise(self.db_pool.runWithConnection(_create)) | |
59 | ||
60 | def test_initial_value(self) -> None: | |
61 | """Check that we read the current token from the DB.""" | |
62 | id_gen = self._create_id_generator() | |
63 | self.assertEqual(id_gen.get_current_token(), 123) | |
64 | ||
65 | def test_single_gen_next(self) -> None: | |
66 | """Check that we correctly increment the current token from the DB.""" | |
67 | id_gen = self._create_id_generator() | |
68 | ||
69 | async def test_gen_next() -> None: | |
70 | async with id_gen.get_next() as next_id: | |
71 | # We haven't persisted `next_id` yet; current token is still 123 | |
72 | self.assertEqual(id_gen.get_current_token(), 123) | |
73 | # But we did learn what the next value is | |
74 | self.assertEqual(next_id, 124) | |
75 | ||
76 | # Once the context manager closes we assume that the `next_id` has been | |
77 | # written to the DB. | |
78 | self.assertEqual(id_gen.get_current_token(), 124) | |
79 | ||
80 | self.get_success(test_gen_next()) | |
81 | ||
82 | def test_multiple_gen_nexts(self) -> None: | |
83 | """Check that we handle overlapping calls to gen_next sensibly.""" | |
84 | id_gen = self._create_id_generator() | |
85 | ||
86 | async def test_gen_next() -> None: | |
87 | ctx1 = id_gen.get_next() | |
88 | ctx2 = id_gen.get_next() | |
89 | ctx3 = id_gen.get_next() | |
90 | ||
91 | # Request three new stream IDs. | |
92 | self.assertEqual(await ctx1.__aenter__(), 124) | |
93 | self.assertEqual(await ctx2.__aenter__(), 125) | |
94 | self.assertEqual(await ctx3.__aenter__(), 126) | |
95 | ||
96 | # None are persisted: current token unchanged. | |
97 | self.assertEqual(id_gen.get_current_token(), 123) | |
98 | ||
99 | # Persist each in turn. | |
100 | await ctx1.__aexit__(None, None, None) | |
101 | self.assertEqual(id_gen.get_current_token(), 124) | |
102 | await ctx2.__aexit__(None, None, None) | |
103 | self.assertEqual(id_gen.get_current_token(), 125) | |
104 | await ctx3.__aexit__(None, None, None) | |
105 | self.assertEqual(id_gen.get_current_token(), 126) | |
106 | ||
107 | self.get_success(test_gen_next()) | |
108 | ||
109 | def test_multiple_gen_nexts_closed_in_different_order(self) -> None: | |
110 | """Check that we handle overlapping calls to gen_next, even when their IDs | |
111 | created and persisted in different orders.""" | |
112 | id_gen = self._create_id_generator() | |
113 | ||
114 | async def test_gen_next() -> None: | |
115 | ctx1 = id_gen.get_next() | |
116 | ctx2 = id_gen.get_next() | |
117 | ctx3 = id_gen.get_next() | |
118 | ||
119 | # Request three new stream IDs. | |
120 | self.assertEqual(await ctx1.__aenter__(), 124) | |
121 | self.assertEqual(await ctx2.__aenter__(), 125) | |
122 | self.assertEqual(await ctx3.__aenter__(), 126) | |
123 | ||
124 | # None are persisted: current token unchanged. | |
125 | self.assertEqual(id_gen.get_current_token(), 123) | |
126 | ||
127 | # Persist them in a different order, starting with 126 from ctx3. | |
128 | await ctx3.__aexit__(None, None, None) | |
129 | # We haven't persisted 124 from ctx1 yet---current token is still 123. | |
130 | self.assertEqual(id_gen.get_current_token(), 123) | |
131 | ||
132 | # Now persist 124 from ctx1. | |
133 | await ctx1.__aexit__(None, None, None) | |
134 | # Current token is then 124, waiting for 125 to be persisted. | |
135 | self.assertEqual(id_gen.get_current_token(), 124) | |
136 | ||
137 | # Finally persist 125 from ctx2. | |
138 | await ctx2.__aexit__(None, None, None) | |
139 | # Current token is then 126 (skipping over 125). | |
140 | self.assertEqual(id_gen.get_current_token(), 126) | |
141 | ||
142 | self.get_success(test_gen_next()) | |
143 | ||
144 | def test_gen_next_while_still_waiting_for_persistence(self) -> None: | |
145 | """Check that we handle overlapping calls to gen_next.""" | |
146 | id_gen = self._create_id_generator() | |
147 | ||
148 | async def test_gen_next() -> None: | |
149 | ctx1 = id_gen.get_next() | |
150 | ctx2 = id_gen.get_next() | |
151 | ctx3 = id_gen.get_next() | |
152 | ||
153 | # Request two new stream IDs. | |
154 | self.assertEqual(await ctx1.__aenter__(), 124) | |
155 | self.assertEqual(await ctx2.__aenter__(), 125) | |
156 | ||
157 | # Persist ctx2 first. | |
158 | await ctx2.__aexit__(None, None, None) | |
159 | # Still waiting on ctx1's ID to be persisted. | |
160 | self.assertEqual(id_gen.get_current_token(), 123) | |
161 | ||
162 | # Now request a third stream ID. It should be 126 (the smallest ID that | |
163 | # we've not yet handed out.) | |
164 | self.assertEqual(await ctx3.__aenter__(), 126) | |
165 | ||
166 | self.get_success(test_gen_next()) | |
25 | 167 | |
26 | 168 | |
27 | 169 | class MultiWriterIdGeneratorTestCase(HomeserverTestCase): |
47 | 189 | ) |
48 | 190 | |
49 | 191 | def _create_id_generator( |
50 | self, instance_name="master", writers: Optional[List[str]] = None | |
192 | self, instance_name: str = "master", writers: Optional[List[str]] = None | |
51 | 193 | ) -> MultiWriterIdGenerator: |
52 | def _create(conn): | |
194 | def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: | |
53 | 195 | return MultiWriterIdGenerator( |
54 | 196 | conn, |
55 | 197 | self.db_pool, |
445 | 587 | self._insert_row_with_id("master", 3) |
446 | 588 | |
447 | 589 | # Now we add a row *without* updating the stream ID |
448 | def _insert(txn): | |
590 | def _insert(txn: Cursor) -> None: | |
449 | 591 | txn.execute("INSERT INTO foobar VALUES (26, 'master')") |
450 | 592 | |
451 | 593 | self.get_success(self.db_pool.runInteraction("_insert", _insert)) |
480 | 622 | ) |
481 | 623 | |
482 | 624 | def _create_id_generator( |
483 | self, instance_name="master", writers: Optional[List[str]] = None | |
625 | self, instance_name: str = "master", writers: Optional[List[str]] = None | |
484 | 626 | ) -> MultiWriterIdGenerator: |
485 | def _create(conn): | |
627 | def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: | |
486 | 628 | return MultiWriterIdGenerator( |
487 | 629 | conn, |
488 | 630 | self.db_pool, |
616 | 758 | ) |
617 | 759 | |
618 | 760 | def _create_id_generator( |
619 | self, instance_name="master", writers: Optional[List[str]] = None | |
761 | self, instance_name: str = "master", writers: Optional[List[str]] = None | |
620 | 762 | ) -> MultiWriterIdGenerator: |
621 | def _create(conn): | |
763 | def _create(conn: LoggingDatabaseConnection) -> MultiWriterIdGenerator: | |
622 | 764 | return MultiWriterIdGenerator( |
623 | 765 | conn, |
624 | 766 | self.db_pool, |
640 | 782 | instance_name: str, |
641 | 783 | number: int, |
642 | 784 | update_stream_table: bool = True, |
643 | ): | |
785 | ) -> None: | |
644 | 786 | """Insert N rows as the given instance, inserting with stream IDs pulled |
645 | 787 | from the postgres sequence. |
646 | 788 | """ |
60 | 60 | |
61 | 61 | filtered = self.get_success( |
62 | 62 | filter_events_for_server( |
63 | self._storage_controllers, "test_server", events_to_filter | |
63 | self._storage_controllers, "test_server", "hs", events_to_filter | |
64 | 64 | ) |
65 | 65 | ) |
66 | 66 | |
82 | 82 | self.assertEqual( |
83 | 83 | self.get_success( |
84 | 84 | filter_events_for_server( |
85 | self._storage_controllers, "remote_hs", [outlier] | |
85 | self._storage_controllers, "remote_hs", "hs", [outlier] | |
86 | 86 | ) |
87 | 87 | ), |
88 | 88 | [outlier], |
93 | 93 | |
94 | 94 | filtered = self.get_success( |
95 | 95 | filter_events_for_server( |
96 | self._storage_controllers, "remote_hs", [outlier, evt] | |
96 | self._storage_controllers, "remote_hs", "local_hs", [outlier, evt] | |
97 | 97 | ) |
98 | 98 | ) |
99 | 99 | self.assertEqual(len(filtered), 2, f"expected 2 results, got: {filtered}") |
105 | 105 | # be redacted) |
106 | 106 | filtered = self.get_success( |
107 | 107 | filter_events_for_server( |
108 | self._storage_controllers, "other_server", [outlier, evt] | |
108 | self._storage_controllers, "other_server", "local_hs", [outlier, evt] | |
109 | 109 | ) |
110 | 110 | ) |
111 | 111 | self.assertEqual(filtered[0], outlier) |
140 | 140 | # ... and the filtering happens. |
141 | 141 | filtered = self.get_success( |
142 | 142 | filter_events_for_server( |
143 | self._storage_controllers, "test_server", events_to_filter | |
143 | self._storage_controllers, "test_server", "local_hs", events_to_filter | |
144 | 144 | ) |
145 | 145 | ) |
146 | 146 |
359 | 359 | store.db_pool.updates.do_next_background_update(False), by=0.1 |
360 | 360 | ) |
361 | 361 | |
362 | def make_homeserver(self, reactor, clock): | |
362 | def make_homeserver(self, reactor: MemoryReactor, clock: Clock): | |
363 | 363 | """ |
364 | 364 | Make and return a homeserver. |
365 | 365 | |
366 | 366 | Args: |
367 | 367 | reactor: A Twisted Reactor, or something that pretends to be one. |
368 | clock (synapse.util.Clock): The Clock, associated with the reactor. | |
368 | clock: The Clock, associated with the reactor. | |
369 | 369 | |
370 | 370 | Returns: |
371 | 371 | A homeserver suitable for testing. |
425 | 425 | |
426 | 426 | Args: |
427 | 427 | reactor: A Twisted Reactor, or something that pretends to be one. |
428 | clock (synapse.util.Clock): The Clock, associated with the reactor. | |
429 | homeserver (synapse.server.HomeServer): The HomeServer to test | |
430 | against. | |
428 | clock: The Clock, associated with the reactor. | |
429 | homeserver: The HomeServer to test against. | |
431 | 430 | |
432 | 431 | Function to optionally be overridden in subclasses. |
433 | 432 | """ |
451 | 450 | given content. |
452 | 451 | |
453 | 452 | Args: |
454 | method (bytes/unicode): The HTTP request method ("verb"). | |
455 | path (bytes/unicode): The HTTP path, suitably URL encoded (e.g. | |
456 | escaped UTF-8 & spaces and such). | |
457 | content (bytes or dict): The body of the request. JSON-encoded, if | |
458 | a dict. | |
453 | method: The HTTP request method ("verb"). | |
454 | path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces | |
455 | and such). content (bytes or dict): The body of the request. | |
456 | JSON-encoded, if a dict. | |
459 | 457 | shorthand: Whether to try and be helpful and prefix the given URL |
460 | 458 | with the usual REST API path, if it doesn't contain it. |
461 | 459 | federation_auth_origin: if set to not-None, we will add a fake |
10 | 10 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
11 | 11 | # See the License for the specific language governing permissions and |
12 | 12 | # limitations under the License. |
13 | from typing import NoReturn | |
13 | 14 | from unittest.mock import Mock |
14 | 15 | |
15 | 16 | from twisted.internet import defer |
22 | 23 | |
23 | 24 | |
24 | 25 | class CachedCallTestCase(TestCase): |
25 | def test_get(self): | |
26 | def test_get(self) -> None: | |
26 | 27 | """ |
27 | 28 | Happy-path test case: makes a couple of calls and makes sure they behave |
28 | 29 | correctly |
29 | 30 | """ |
30 | d = Deferred() | |
31 | d: "Deferred[int]" = Deferred() | |
31 | 32 | |
32 | async def f(): | |
33 | async def f() -> int: | |
33 | 34 | return await d |
34 | 35 | |
35 | 36 | slow_call = Mock(side_effect=f) |
42 | 43 | # now fire off a couple of calls |
43 | 44 | completed_results = [] |
44 | 45 | |
45 | async def r(): | |
46 | async def r() -> None: | |
46 | 47 | res = await cached_call.get() |
47 | 48 | completed_results.append(res) |
48 | 49 | |
68 | 69 | self.assertEqual(r3, 123) |
69 | 70 | slow_call.assert_not_called() |
70 | 71 | |
71 | def test_fast_call(self): | |
72 | def test_fast_call(self) -> None: | |
72 | 73 | """ |
73 | 74 | Test the behaviour when the underlying function completes immediately |
74 | 75 | """ |
75 | 76 | |
76 | async def f(): | |
77 | async def f() -> int: | |
77 | 78 | return 12 |
78 | 79 | |
79 | 80 | fast_call = Mock(side_effect=f) |
91 | 92 | |
92 | 93 | |
93 | 94 | class RetryOnExceptionCachedCallTestCase(TestCase): |
94 | def test_get(self): | |
95 | def test_get(self) -> None: | |
95 | 96 | # set up the RetryOnExceptionCachedCall around a function which will fail |
96 | 97 | # (after a while) |
97 | d = Deferred() | |
98 | d: "Deferred[int]" = Deferred() | |
98 | 99 | |
99 | async def f1(): | |
100 | async def f1() -> NoReturn: | |
100 | 101 | await d |
101 | 102 | raise ValueError("moo") |
102 | 103 | |
109 | 110 | # now fire off a couple of calls |
110 | 111 | completed_results = [] |
111 | 112 | |
112 | async def r(): | |
113 | async def r() -> None: | |
113 | 114 | try: |
114 | 115 | await cached_call.get() |
115 | 116 | except Exception as e1: |
136 | 137 | # to the getter |
137 | 138 | d = Deferred() |
138 | 139 | |
139 | async def f2(): | |
140 | async def f2() -> int: | |
140 | 141 | return await d |
141 | 142 | |
142 | 143 | slow_call.reset_mock() |
12 | 12 | # limitations under the License. |
13 | 13 | |
14 | 14 | from functools import partial |
15 | from typing import List, Tuple | |
15 | 16 | |
16 | 17 | from twisted.internet import defer |
17 | 18 | |
21 | 22 | |
22 | 23 | |
23 | 24 | class DeferredCacheTestCase(TestCase): |
24 | def test_empty(self): | |
25 | cache = DeferredCache("test") | |
25 | def test_empty(self) -> None: | |
26 | cache: DeferredCache[str, int] = DeferredCache("test") | |
26 | 27 | with self.assertRaises(KeyError): |
27 | 28 | cache.get("foo") |
28 | 29 | |
29 | def test_hit(self): | |
30 | cache = DeferredCache("test") | |
30 | def test_hit(self) -> None: | |
31 | cache: DeferredCache[str, int] = DeferredCache("test") | |
31 | 32 | cache.prefill("foo", 123) |
32 | 33 | |
33 | 34 | self.assertEqual(self.successResultOf(cache.get("foo")), 123) |
34 | 35 | |
35 | def test_hit_deferred(self): | |
36 | cache = DeferredCache("test") | |
37 | origin_d = defer.Deferred() | |
36 | def test_hit_deferred(self) -> None: | |
37 | cache: DeferredCache[str, int] = DeferredCache("test") | |
38 | origin_d: "defer.Deferred[int]" = defer.Deferred() | |
38 | 39 | set_d = cache.set("k1", origin_d) |
39 | 40 | |
40 | 41 | # get should return an incomplete deferred |
42 | 43 | self.assertFalse(get_d.called) |
43 | 44 | |
44 | 45 | # add a callback that will make sure that the set_d gets called before the get_d |
45 | def check1(r): | |
46 | def check1(r: str) -> str: | |
46 | 47 | self.assertTrue(set_d.called) |
47 | 48 | return r |
48 | 49 | |
54 | 55 | self.assertEqual(self.successResultOf(set_d), 99) |
55 | 56 | self.assertEqual(self.successResultOf(get_d), 99) |
56 | 57 | |
57 | def test_callbacks(self): | |
58 | def test_callbacks(self) -> None: | |
58 | 59 | """Invalidation callbacks are called at the right time""" |
59 | cache = DeferredCache("test") | |
60 | cache: DeferredCache[str, int] = DeferredCache("test") | |
60 | 61 | callbacks = set() |
61 | 62 | |
62 | 63 | # start with an entry, with a callback |
63 | 64 | cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill")) |
64 | 65 | |
65 | 66 | # now replace that entry with a pending result |
66 | origin_d = defer.Deferred() | |
67 | origin_d: "defer.Deferred[int]" = defer.Deferred() | |
67 | 68 | set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set")) |
68 | 69 | |
69 | 70 | # ... and also make a get request |
88 | 89 | cache.prefill("k1", 30) |
89 | 90 | self.assertEqual(callbacks, {"set", "get"}) |
90 | 91 | |
91 | def test_set_fail(self): | |
92 | cache = DeferredCache("test") | |
92 | def test_set_fail(self) -> None: | |
93 | cache: DeferredCache[str, int] = DeferredCache("test") | |
93 | 94 | callbacks = set() |
94 | 95 | |
95 | 96 | # start with an entry, with a callback |
96 | 97 | cache.prefill("k1", 10, callback=lambda: callbacks.add("prefill")) |
97 | 98 | |
98 | 99 | # now replace that entry with a pending result |
99 | origin_d = defer.Deferred() | |
100 | origin_d: defer.Deferred = defer.Deferred() | |
100 | 101 | set_d = cache.set("k1", origin_d, callback=lambda: callbacks.add("set")) |
101 | 102 | |
102 | 103 | # ... and also make a get request |
125 | 126 | cache.prefill("k1", 30) |
126 | 127 | self.assertEqual(callbacks, {"prefill", "get2"}) |
127 | 128 | |
128 | def test_get_immediate(self): | |
129 | cache = DeferredCache("test") | |
130 | d1 = defer.Deferred() | |
129 | def test_get_immediate(self) -> None: | |
130 | cache: DeferredCache[str, int] = DeferredCache("test") | |
131 | d1: "defer.Deferred[int]" = defer.Deferred() | |
131 | 132 | cache.set("key1", d1) |
132 | 133 | |
133 | 134 | # get_immediate should return default |
141 | 142 | v = cache.get_immediate("key1", 1) |
142 | 143 | self.assertEqual(v, 2) |
143 | 144 | |
144 | def test_invalidate(self): | |
145 | cache = DeferredCache("test") | |
145 | def test_invalidate(self) -> None: | |
146 | cache: DeferredCache[Tuple[str], int] = DeferredCache("test") | |
146 | 147 | cache.prefill(("foo",), 123) |
147 | 148 | cache.invalidate(("foo",)) |
148 | 149 | |
149 | 150 | with self.assertRaises(KeyError): |
150 | 151 | cache.get(("foo",)) |
151 | 152 | |
152 | def test_invalidate_all(self): | |
153 | cache = DeferredCache("testcache") | |
153 | def test_invalidate_all(self) -> None: | |
154 | cache: DeferredCache[str, str] = DeferredCache("testcache") | |
154 | 155 | |
155 | 156 | callback_record = [False, False] |
156 | 157 | |
157 | def record_callback(idx): | |
158 | def record_callback(idx: int) -> None: | |
158 | 159 | callback_record[idx] = True |
159 | 160 | |
160 | 161 | # add a couple of pending entries |
161 | d1 = defer.Deferred() | |
162 | d1: "defer.Deferred[str]" = defer.Deferred() | |
162 | 163 | cache.set("key1", d1, partial(record_callback, 0)) |
163 | 164 | |
164 | d2 = defer.Deferred() | |
165 | d2: "defer.Deferred[str]" = defer.Deferred() | |
165 | 166 | cache.set("key2", d2, partial(record_callback, 1)) |
166 | 167 | |
167 | 168 | # lookup should return pending deferreds |
192 | 193 | with self.assertRaises(KeyError): |
193 | 194 | cache.get("key1", None) |
194 | 195 | |
195 | def test_eviction(self): | |
196 | cache = DeferredCache( | |
196 | def test_eviction(self) -> None: | |
197 | cache: DeferredCache[int, str] = DeferredCache( | |
197 | 198 | "test", max_entries=2, apply_cache_factor_from_config=False |
198 | 199 | ) |
199 | 200 | |
207 | 208 | cache.get(2) |
208 | 209 | cache.get(3) |
209 | 210 | |
210 | def test_eviction_lru(self): | |
211 | cache = DeferredCache( | |
211 | def test_eviction_lru(self) -> None: | |
212 | cache: DeferredCache[int, str] = DeferredCache( | |
212 | 213 | "test", max_entries=2, apply_cache_factor_from_config=False |
213 | 214 | ) |
214 | 215 | |
226 | 227 | cache.get(1) |
227 | 228 | cache.get(3) |
228 | 229 | |
229 | def test_eviction_iterable(self): | |
230 | cache = DeferredCache( | |
230 | def test_eviction_iterable(self) -> None: | |
231 | cache: DeferredCache[int, List[str]] = DeferredCache( | |
231 | 232 | "test", |
232 | 233 | max_entries=3, |
233 | 234 | apply_cache_factor_from_config=False, |
12 | 12 | # See the License for the specific language governing permissions and |
13 | 13 | # limitations under the License. |
14 | 14 | import logging |
15 | from typing import Iterable, Set, Tuple | |
15 | from typing import Iterable, Set, Tuple, cast | |
16 | 16 | from unittest import mock |
17 | 17 | |
18 | 18 | from twisted.internet import defer, reactor |
19 | 19 | from twisted.internet.defer import CancelledError, Deferred |
20 | from twisted.internet.interfaces import IReactorTime | |
20 | 21 | |
21 | 22 | from synapse.api.errors import SynapseError |
22 | 23 | from synapse.logging.context import ( |
36 | 37 | |
37 | 38 | |
38 | 39 | def run_on_reactor(): |
39 | d = defer.Deferred() | |
40 | reactor.callLater(0, d.callback, 0) | |
40 | d: "Deferred[int]" = defer.Deferred() | |
41 | cast(IReactorTime, reactor).callLater(0, d.callback, 0) | |
41 | 42 | return make_deferred_yieldable(d) |
42 | 43 | |
43 | 44 | |
223 | 224 | callbacks: Set[str] = set() |
224 | 225 | |
225 | 226 | # set off an asynchronous request |
226 | obj.result = origin_d = defer.Deferred() | |
227 | origin_d: Deferred = defer.Deferred() | |
228 | obj.result = origin_d | |
227 | 229 | |
228 | 230 | d1 = obj.fn(1, on_invalidate=lambda: callbacks.add("d1")) |
229 | 231 | self.assertFalse(d1.called) |
261 | 263 | """Check that logcontexts are set and restored correctly when |
262 | 264 | using the cache.""" |
263 | 265 | |
264 | complete_lookup = defer.Deferred() | |
266 | complete_lookup: Deferred = defer.Deferred() | |
265 | 267 | |
266 | 268 | class Cls: |
267 | 269 | @descriptors.cached() |
771 | 773 | |
772 | 774 | @descriptors.cachedList(cached_method_name="fn", list_name="args1") |
773 | 775 | async def list_fn(self, args1, arg2): |
774 | assert current_context().name == "c1" | |
776 | context = current_context() | |
777 | assert isinstance(context, LoggingContext) | |
778 | assert context.name == "c1" | |
775 | 779 | # we want this to behave like an asynchronous function |
776 | 780 | await run_on_reactor() |
777 | assert current_context().name == "c1" | |
781 | context = current_context() | |
782 | assert isinstance(context, LoggingContext) | |
783 | assert context.name == "c1" | |
778 | 784 | return self.mock(args1, arg2) |
779 | 785 | |
780 | 786 | with LoggingContext("c1") as c1: |
833 | 839 | return self.mock(args1) |
834 | 840 | |
835 | 841 | obj = Cls() |
836 | deferred_result = Deferred() | |
842 | deferred_result: "Deferred[dict]" = Deferred() | |
837 | 843 | obj.mock.return_value = deferred_result |
838 | 844 | |
839 | 845 | # start off several concurrent lookups of the same key |
34 | 34 | (These have cache with a short timeout_ms=, shorter than will be tested through advancing the clock) |
35 | 35 | """ |
36 | 36 | |
37 | def setUp(self): | |
37 | def setUp(self) -> None: | |
38 | 38 | self.reactor, self.clock = get_clock() |
39 | 39 | |
40 | 40 | def with_cache(self, name: str, ms: int = 0) -> ResponseCache: |
48 | 48 | await self.clock.sleep(1) |
49 | 49 | return o |
50 | 50 | |
51 | def test_cache_hit(self): | |
51 | def test_cache_hit(self) -> None: | |
52 | 52 | cache = self.with_cache("keeping_cache", ms=9001) |
53 | 53 | |
54 | 54 | expected_result = "howdy" |
73 | 73 | "cache should still have the result", |
74 | 74 | ) |
75 | 75 | |
76 | def test_cache_miss(self): | |
76 | def test_cache_miss(self) -> None: | |
77 | 77 | cache = self.with_cache("trashing_cache", ms=0) |
78 | 78 | |
79 | 79 | expected_result = "howdy" |
89 | 89 | ) |
90 | 90 | self.assertCountEqual([], cache.keys(), "cache should not have the result now") |
91 | 91 | |
92 | def test_cache_expire(self): | |
92 | def test_cache_expire(self) -> None: | |
93 | 93 | cache = self.with_cache("short_cache", ms=1000) |
94 | 94 | |
95 | 95 | expected_result = "howdy" |
114 | 114 | self.reactor.pump((2,)) |
115 | 115 | self.assertCountEqual([], cache.keys(), "cache should not have the result now") |
116 | 116 | |
117 | def test_cache_wait_hit(self): | |
117 | def test_cache_wait_hit(self) -> None: | |
118 | 118 | cache = self.with_cache("neutral_cache") |
119 | 119 | |
120 | 120 | expected_result = "howdy" |
130 | 130 | |
131 | 131 | self.assertEqual(expected_result, self.successResultOf(wrap_d)) |
132 | 132 | |
133 | def test_cache_wait_expire(self): | |
133 | def test_cache_wait_expire(self) -> None: | |
134 | 134 | cache = self.with_cache("medium_cache", ms=3000) |
135 | 135 | |
136 | 136 | expected_result = "howdy" |
161 | 161 | self.assertCountEqual([], cache.keys(), "cache should not have the result now") |
162 | 162 | |
163 | 163 | @parameterized.expand([(True,), (False,)]) |
164 | def test_cache_context_nocache(self, should_cache: bool): | |
164 | def test_cache_context_nocache(self, should_cache: bool) -> None: | |
165 | 165 | """If the callback clears the should_cache bit, the result should not be cached""" |
166 | 166 | cache = self.with_cache("medium_cache", ms=3000) |
167 | 167 | |
169 | 169 | |
170 | 170 | call_count = 0 |
171 | 171 | |
172 | async def non_caching(o: str, cache_context: ResponseCacheContext[int]): | |
172 | async def non_caching(o: str, cache_context: ResponseCacheContext[int]) -> str: | |
173 | 173 | nonlocal call_count |
174 | 174 | call_count += 1 |
175 | 175 | await self.clock.sleep(1) |
19 | 19 | |
20 | 20 | |
21 | 21 | class CacheTestCase(unittest.TestCase): |
22 | def setUp(self): | |
22 | def setUp(self) -> None: | |
23 | 23 | self.mock_timer = Mock(side_effect=lambda: 100.0) |
24 | self.cache = TTLCache("test_cache", self.mock_timer) | |
24 | self.cache: TTLCache[str, str] = TTLCache("test_cache", self.mock_timer) | |
25 | 25 | |
26 | def test_get(self): | |
26 | def test_get(self) -> None: | |
27 | 27 | """simple set/get tests""" |
28 | 28 | self.cache.set("one", "1", 10) |
29 | 29 | self.cache.set("two", "2", 20) |
58 | 58 | self.assertEqual(self.cache._metrics.hits, 4) |
59 | 59 | self.assertEqual(self.cache._metrics.misses, 5) |
60 | 60 | |
61 | def test_expiry(self): | |
61 | def test_expiry(self) -> None: | |
62 | 62 | self.cache.set("one", "1", 10) |
63 | 63 | self.cache.set("two", "2", 20) |
64 | 64 | self.cache.set("three", "3", 30) |