Codebase list matrix-synapse / 45de6f9
New upstream version 1.6.0 Andrej Shadura 4 years ago
212 changed file(s) with 6378 addition(s) and 2902 deletion(s). Raw diff Collapse all Expand all
0 # Configuration file used for testing the 'synapse_port_db' script.
1 # Tells the script to connect to the postgresql database that will be available in the
2 # CI's Docker setup at the point where this file is considered.
3 server_name: "test"
4
5 signing_key_path: "/src/.buildkite/test.signing.key"
6
7 report_stats: false
8
9 database:
10 name: "psycopg2"
11 args:
12 user: postgres
13 host: postgres
14 password: postgres
15 database: synapse
16
17 # Suppress the key server warning.
18 trusted_key_servers:
19 - server_name: "matrix.org"
20 suppress_key_server_warning: true
0 #!/usr/bin/env python
1 # -*- coding: utf-8 -*-
2 # Copyright 2019 The Matrix.org Foundation C.I.C.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import logging
17 from synapse.storage.engines import create_engine
18
19 logger = logging.getLogger("create_postgres_db")
20
21 if __name__ == "__main__":
22 # Create a PostgresEngine.
23 db_engine = create_engine({"name": "psycopg2", "args": {}})
24
25 # Connect to postgres to create the base database.
26 # We use "postgres" as a database because it's bound to exist and the "synapse" one
27 # doesn't exist yet.
28 db_conn = db_engine.module.connect(
29 user="postgres", host="postgres", password="postgres", dbname="postgres"
30 )
31 db_conn.autocommit = True
32 cur = db_conn.cursor()
33 cur.execute("CREATE DATABASE synapse;")
34 cur.close()
35 db_conn.close()
0 #!/bin/bash
1 #
2 # Test script for 'synapse_port_db', which creates a virtualenv, installs Synapse along
3 # with additional dependencies needed for the test (such as coverage or the PostgreSQL
4 # driver), update the schema of the test SQLite database and run background updates on it,
5 # create an empty test database in PostgreSQL, then run the 'synapse_port_db' script to
6 # test porting the SQLite database to the PostgreSQL database (with coverage).
7
8 set -xe
9 cd `dirname $0`/../..
10
11 echo "--- Install dependencies"
12
13 # Install dependencies for this test.
14 pip install psycopg2 coverage coverage-enable-subprocess
15
16 # Install Synapse itself. This won't update any libraries.
17 pip install -e .
18
19 echo "--- Generate the signing key"
20
21 # Generate the server's signing key.
22 python -m synapse.app.homeserver --generate-keys -c .buildkite/sqlite-config.yaml
23
24 echo "--- Prepare the databases"
25
26 # Make sure the SQLite3 database is using the latest schema and has no pending background update.
27 scripts-dev/update_database --database-config .buildkite/sqlite-config.yaml
28
29 # Create the PostgreSQL database.
30 ./.buildkite/scripts/create_postgres_db.py
31
32 echo "+++ Run synapse_port_db"
33
34 # Run the script
35 coverage run scripts/synapse_port_db --sqlite-database .buildkite/test_db.db --postgres-config .buildkite/postgres-config.yaml
0 # Configuration file used for testing the 'synapse_port_db' script.
1 # Tells the 'update_database' script to connect to the test SQLite database to upgrade its
2 # schema and run background updates on it.
3 server_name: "test"
4
5 signing_key_path: "/src/.buildkite/test.signing.key"
6
7 report_stats: false
8
9 database:
10 name: "sqlite3"
11 args:
12 database: ".buildkite/test_db.db"
13
14 # Suppress the key server warning.
15 trusted_key_servers:
16 - server_name: "matrix.org"
17 suppress_key_server_warning: true
Binary diff not shown
44 * [ ] Pull request is based on the develop branch
55 * [ ] Pull request includes a [changelog file](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#changelog)
66 * [ ] Pull request includes a [sign off](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#sign-off)
7 * [ ] Code style is correct (run the [linters](https://github.com/matrix-org/synapse/blob/master/CONTRIBUTING.rst#code-style))
0 Synapse 1.6.0 (2019-11-26)
1 ==========================
2
3 Bugfixes
4 --------
5
6 - Fix phone home stats reporting. ([\#6418](https://github.com/matrix-org/synapse/issues/6418))
7
8
9 Synapse 1.6.0rc2 (2019-11-25)
10 =============================
11
12 Bugfixes
13 --------
14
15 - Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407))
16
17
18 Synapse 1.6.0rc1 (2019-11-20)
19 =============================
20
21 Features
22 --------
23
24 - Add federation support for cross-signing. ([\#5727](https://github.com/matrix-org/synapse/issues/5727))
25 - Increase default room version from 4 to 5, thereby enforcing server key validity period checks. ([\#6220](https://github.com/matrix-org/synapse/issues/6220))
26 - Add support for outbound http proxying via http_proxy/HTTPS_PROXY env vars. ([\#6238](https://github.com/matrix-org/synapse/issues/6238))
27 - Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)). ([\#6301](https://github.com/matrix-org/synapse/issues/6301), [\#6310](https://github.com/matrix-org/synapse/issues/6310), [\#6340](https://github.com/matrix-org/synapse/issues/6340))
28
29
30 Bugfixes
31 --------
32
33 - Fix LruCache callback deduplication for Python 3.8. Contributed by @V02460. ([\#6213](https://github.com/matrix-org/synapse/issues/6213))
34 - Remove a room from a server's public rooms list on room upgrade. ([\#6232](https://github.com/matrix-org/synapse/issues/6232), [\#6235](https://github.com/matrix-org/synapse/issues/6235))
35 - Delete keys from key backup when deleting backup versions. ([\#6253](https://github.com/matrix-org/synapse/issues/6253))
36 - Make notification of cross-signing signatures work with workers. ([\#6254](https://github.com/matrix-org/synapse/issues/6254))
37 - Fix exception when remote servers attempt to join a room that they're not allowed to join. ([\#6278](https://github.com/matrix-org/synapse/issues/6278))
38 - Prevent errors from appearing on Synapse startup if `git` is not installed. ([\#6284](https://github.com/matrix-org/synapse/issues/6284))
39 - Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash. ([\#6306](https://github.com/matrix-org/synapse/issues/6306))
40 - Fix `/purge_room` admin API. ([\#6307](https://github.com/matrix-org/synapse/issues/6307))
41 - Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0. ([\#6313](https://github.com/matrix-org/synapse/issues/6313))
42 - Fix bug which casued rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320))
43 - Fix bug where `rc_login` ratelimiting would prematurely kick in. ([\#6335](https://github.com/matrix-org/synapse/issues/6335))
44 - Prevent the server taking a long time to start up when guest registration is enabled. ([\#6338](https://github.com/matrix-org/synapse/issues/6338))
45 - Fix bug where upgrading a guest account to a full user would fail when account validity is enabled. ([\#6359](https://github.com/matrix-org/synapse/issues/6359))
46 - Fix `to_device` stream ID getting reset every time Synapse restarts, which had the potential to cause unable to decrypt errors. ([\#6363](https://github.com/matrix-org/synapse/issues/6363))
47 - Fix permission denied error when trying to generate a config file with the docker image. ([\#6389](https://github.com/matrix-org/synapse/issues/6389))
48
49
50 Improved Documentation
51 ----------------------
52
53 - Contributor documentation now mentions script to run linters. ([\#6164](https://github.com/matrix-org/synapse/issues/6164))
54 - Modify CAPTCHA_SETUP.md to update the terms `private key` and `public key` to `secret key` and `site key` respectively. Contributed by Yash Jipkate. ([\#6257](https://github.com/matrix-org/synapse/issues/6257))
55 - Update `INSTALL.md` Email section to talk about `account_threepid_delegates`. ([\#6272](https://github.com/matrix-org/synapse/issues/6272))
56 - Fix a small typo in `account_threepid_delegates` configuration option. ([\#6273](https://github.com/matrix-org/synapse/issues/6273))
57
58
59 Internal Changes
60 ----------------
61
62 - Add a CI job to test the `synapse_port_db` script. ([\#6140](https://github.com/matrix-org/synapse/issues/6140), [\#6276](https://github.com/matrix-org/synapse/issues/6276))
63 - Convert EventContext to an attrs. ([\#6218](https://github.com/matrix-org/synapse/issues/6218))
64 - Move `persist_events` out from main data store. ([\#6240](https://github.com/matrix-org/synapse/issues/6240), [\#6300](https://github.com/matrix-org/synapse/issues/6300))
65 - Reduce verbosity of user/room stats. ([\#6250](https://github.com/matrix-org/synapse/issues/6250))
66 - Reduce impact of debug logging. ([\#6251](https://github.com/matrix-org/synapse/issues/6251))
67 - Expose some homeserver functionality to spam checkers. ([\#6259](https://github.com/matrix-org/synapse/issues/6259))
68 - Change cache descriptors to always return deferreds. ([\#6263](https://github.com/matrix-org/synapse/issues/6263), [\#6291](https://github.com/matrix-org/synapse/issues/6291))
69 - Fix incorrect comment regarding the functionality of an `if` statement. ([\#6269](https://github.com/matrix-org/synapse/issues/6269))
70 - Update CI to run `isort` over the `scripts` and `scripts-dev` directories. ([\#6270](https://github.com/matrix-org/synapse/issues/6270))
71 - Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated. ([\#6271](https://github.com/matrix-org/synapse/issues/6271), [\#6314](https://github.com/matrix-org/synapse/issues/6314))
72 - Port replication http server endpoints to async/await. ([\#6274](https://github.com/matrix-org/synapse/issues/6274))
73 - Port room rest handlers to async/await. ([\#6275](https://github.com/matrix-org/synapse/issues/6275))
74 - Remove redundant CLI parameters on CI's `flake8` step. ([\#6277](https://github.com/matrix-org/synapse/issues/6277))
75 - Port `federation_server.py` to async/await. ([\#6279](https://github.com/matrix-org/synapse/issues/6279))
76 - Port receipt and read markers to async/wait. ([\#6280](https://github.com/matrix-org/synapse/issues/6280))
77 - Split out state storage into separate data store. ([\#6294](https://github.com/matrix-org/synapse/issues/6294), [\#6295](https://github.com/matrix-org/synapse/issues/6295))
78 - Refactor EventContext for clarity. ([\#6298](https://github.com/matrix-org/synapse/issues/6298))
79 - Update the version of black used to 19.10b0. ([\#6304](https://github.com/matrix-org/synapse/issues/6304))
80 - Add some documentation about worker replication. ([\#6305](https://github.com/matrix-org/synapse/issues/6305))
81 - Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6308](https://github.com/matrix-org/synapse/issues/6308))
82 - Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only. ([\#6312](https://github.com/matrix-org/synapse/issues/6312))
83 - Add optional python dependencies and dependant binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317))
84 - Remove the dependency on psutil and replace functionality with the stdlib `resource` module. ([\#6318](https://github.com/matrix-org/synapse/issues/6318), [\#6336](https://github.com/matrix-org/synapse/issues/6336))
85 - Improve documentation for EventContext fields. ([\#6319](https://github.com/matrix-org/synapse/issues/6319))
86 - Add some checks that we aren't using state from rejected events. ([\#6330](https://github.com/matrix-org/synapse/issues/6330))
87 - Add continuous integration for python 3.8. ([\#6341](https://github.com/matrix-org/synapse/issues/6341))
88 - Correct spacing/case of various instances of the word "homeserver". ([\#6357](https://github.com/matrix-org/synapse/issues/6357))
89 - Temporarily blacklist the failing unit test PurgeRoomTestCase.test_purge_room. ([\#6361](https://github.com/matrix-org/synapse/issues/6361))
90
91
092 Synapse 1.5.1 (2019-11-06)
193 ==========================
294
5757 got as far as documenting it... For instance, synapse's code style doc lives
5858 at https://github.com/matrix-org/synapse/tree/master/docs/code_style.md.
5959
60 To facilitate meeting these criteria you can run ``scripts-dev/lint.sh``
61 locally. Since this runs the tools listed in the above document, you'll need
62 python 3.6 and to install each tool. **Note that the script does not just
63 test/check, but also reformats code, so you may wish to ensure any new code is
64 committed first**. By default this script checks all files and can take some
65 time; if you alter only certain files, you might wish to specify paths as
66 arguments to reduce the run-time.
67
6068 Please ensure your changes match the cosmetic style of the existing project,
6169 and **never** mix cosmetic and functional changes in the same commit, as it
6270 makes it horribly hard to review otherwise.
71
72 Before doing a commit, ensure the changes you've made don't produce
73 linting errors. You can do this by running the linters as follows. Ensure to
74 commit any files that were corrected.
75
76 ::
77 # Install the dependencies
78 pip install -U black flake8 isort
79
80 # Run the linter script
81 ./scripts-dev/lint.sh
6382
6483 Changelog
6584 ~~~~~~~~~
3535 System requirements:
3636
3737 - POSIX-compliant system (tested on Linux & OS X)
38 - Python 3.5, 3.6, or 3.7
38 - Python 3.5, 3.6, 3.7 or 3.8.
3939 - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org
4040
4141 Synapse is written in Python but some of the libraries it uses are written in
412412
413413 ## Email
414414
415 It is desirable for Synapse to have the capability to send email. For example,
416 this is required to support the 'password reset' feature.
415 It is desirable for Synapse to have the capability to send email. This allows
416 Synapse to send password reset emails, send verifications when an email address
417 is added to a user's account, and send email notifications to users when they
418 receive new messages.
417419
418420 To configure an SMTP server for Synapse, modify the configuration section
419 headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
420 and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
421 ``smtp_pass``, and ``require_transport_security``.
422
423 If Synapse is not configured with an SMTP server, password reset via email will
424 be disabled by default.
421 headed `email`, and be sure to have at least the `smtp_host`, `smtp_port`
422 and `notif_from` fields filled out. You may also need to set `smtp_user`,
423 `smtp_pass`, and `require_transport_security`.
424
425 If email is not configured, password reset, registration and notifications via
426 email will be disabled.
425427
426428 ## Registering a user
427429
7777 m = re.match("^join (\S+)$", line)
7878 if m:
7979 # The `sender` wants to join a room.
80 room_name, = m.groups()
80 (room_name,) = m.groups()
8181 self.print_line("%s joining %s" % (self.user, room_name))
8282 self.server.join_room(room_name, self.user, self.user)
8383 # self.print_line("OK.")
104104 m = re.match("^backfill (\S+)$", line)
105105 if m:
106106 # we want to backfill a room
107 room_name, = m.groups()
107 (room_name,) = m.groups()
108108 self.print_line("backfill %s" % room_name)
109109 self.server.backfill(room_name)
110110 return
0 matrix-synapse-py3 (1.6.0) stable; urgency=medium
1
2 * New synapse release 1.6.0.
3
4 -- Synapse Packaging team <packages@matrix.org> Tue, 26 Nov 2019 12:15:40 +0000
5
06 matrix-synapse-py3 (1.5.1) stable; urgency=medium
17
28 * New synapse release 1.5.1.
100100 to use a reverse proxy, or configure Synapse to expose an HTTPS port.
101101
102102 For documentation on using a reverse proxy, see
103 https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.rst.
103 https://github.com/matrix-org/synapse/blob/master/docs/reverse_proxy.md.
104104
105105 For more information on enabling TLS support in synapse itself, see
106106 https://github.com/matrix-org/synapse/blob/master/INSTALL.md#tls-certificates. Of
168168 # log("running %s" % (args, ))
169169
170170 if ownership is not None:
171 # make sure that synapse has perms to write to the data dir.
172 subprocess.check_output(["chown", ownership, data_dir])
173
171174 args = ["su-exec", ownership] + args
172175 os.execv("/sbin/su-exec", args)
173
174 # make sure that synapse has perms to write to the data dir.
175 subprocess.check_output(["chown", ownership, data_dir])
176176 else:
177177 os.execv("/usr/local/bin/python", args)
178178
216216 # backwards-compatibility generate-a-config-on-the-fly mode
217217 if "SYNAPSE_CONFIG_PATH" in environ:
218218 error(
219 "SYNAPSE_SERVER_NAME and SYNAPSE_CONFIG_PATH are mutually exclusive "
220 "except in `generate` or `migrate_config` mode."
219 "SYNAPSE_SERVER_NAME can only be combined with SYNAPSE_CONFIG_PATH "
220 "in `generate` or `migrate_config` mode. To start synapse using a "
221 "config file, unset the SYNAPSE_SERVER_NAME environment variable."
221222 )
222223
223224 config_path = "/compiled/homeserver.yaml"
33
44 ## Getting keys
55
6 Requires a public/private key pair from:
6 Requires a site/secret key pair from:
77
88 <https://developers.google.com/recaptcha/>
99
1414 The keys are a config option on the home server config. If they are not
1515 visible, you can generate them via `--generate-config`. Set the following value:
1616
17 recaptcha_public_key: YOUR_PUBLIC_KEY
18 recaptcha_private_key: YOUR_PRIVATE_KEY
17 recaptcha_public_key: YOUR_SITE_KEY
18 recaptcha_private_key: YOUR_SECRET_KEY
1919
2020 In addition, you MUST enable captchas via:
2121
7171 # For example, for room version 1, default_room_version should be set
7272 # to "1".
7373 #
74 #default_room_version: "4"
74 #default_room_version: "5"
7575
7676 # The GC threshold parameters to pass to `gc.set_threshold`, if defined
7777 #
286286 # Used by phonehome stats to group together related servers.
287287 #server_context: context
288288
289 # Resource-constrained Homeserver Settings
289 # Resource-constrained homeserver Settings
290290 #
291291 # If limit_remote_rooms.enabled is True, the room complexity will be
292292 # checked before a user joins a new remote room. If it is above
742742 ## Captcha ##
743743 # See docs/CAPTCHA_SETUP for full details of configuring this.
744744
745 # This Home Server's ReCAPTCHA public key.
745 # This homeserver's ReCAPTCHA public key.
746746 #
747747 #recaptcha_public_key: "YOUR_PUBLIC_KEY"
748748
749 # This Home Server's ReCAPTCHA private key.
749 # This homeserver's ReCAPTCHA private key.
750750 #
751751 #recaptcha_private_key: "YOUR_PRIVATE_KEY"
752752
954954 # If a delegate is specified, the config option public_baseurl must also be filled out.
955955 #
956956 account_threepid_delegates:
957 #email: https://example.com # Delegate email sending to example.org
957 #email: https://example.com # Delegate email sending to example.com
958958 #msisdn: http://localhost:8090 # Delegate SMS sending to this local process
959959
960960 # Users who register on this homeserver will automatically be joined
12691269 # smtp_user: "exampleusername"
12701270 # smtp_pass: "examplepassword"
12711271 # require_transport_security: false
1272 # notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
1272 # notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
12731273 # app_name: Matrix
12741274 #
12751275 # # Enable email notifications by default
198198
199199 #### REPLICATE (C)
200200
201 Asks the server to replicate a given stream
201 Asks the server to replicate a given stream. The syntax is:
202
203 ```
204 REPLICATE <stream_name> <token>
205 ```
206
207 Where `<token>` may be either:
208 * a numeric stream_id to stream updates since (exclusive)
209 * `NOW` to stream all subsequent updates.
210
211 The `<stream_name>` is the name of a replication stream to subscribe
212 to (see [here](../synapse/replication/tcp/streams/_base.py) for a list
213 of streams). It can also be `ALL` to subscribe to all known streams,
214 in which case the `<token>` must be set to `NOW`.
202215
203216 #### USER_SYNC (C)
204217
00 [mypy]
1 namespace_packages=True
2 plugins=mypy_zope:plugin
3 follow_imports=skip
4 mypy_path=stubs
1 namespace_packages = True
2 plugins = mypy_zope:plugin
3 follow_imports = normal
4 check_untyped_defs = True
5 show_error_codes = True
6 show_traceback = True
7 mypy_path = stubs
58
69 [mypy-zope]
710 ignore_missing_imports = True
7171 # check that the original exists
7272 original_file = src_paths.remote_media_filepath(origin_server, file_id)
7373 if not os.path.exists(original_file):
74 logger.warn(
74 logger.warning(
7575 "Original for %s/%s (%s) does not exist",
7676 origin_server,
7777 file_id,
156156 )
157157 except self.database_engine.module.DatabaseError as e:
158158 if self.database_engine.is_deadlock(e):
159 logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
159 logger.warning("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
160160 if i < N:
161161 i += 1
162162 conn.rollback()
431431 for row in rows:
432432 d = dict(zip(headers, row))
433433 if "\0" in d['value']:
434 logger.warn('dropping search row %s', d)
434 logger.warning('dropping search row %s', d)
435435 else:
436436 rows_dict.append(d)
437437
646646 if isinstance(col, bytes):
647647 return bytearray(col)
648648 elif isinstance(col, string_types) and "\0" in col:
649 logger.warn(
649 logger.warning(
650650 "DROPPING ROW: NUL value in table %s col %s: %r",
651651 table,
652652 headers[j],
1919 DISTS = (
2020 "debian:stretch",
2121 "debian:buster",
22 "debian:bullseye",
2223 "debian:sid",
2324 "ubuntu:xenial",
2425 "ubuntu:bionic",
2526 "ubuntu:cosmic",
2627 "ubuntu:disco",
28 "ubuntu:eoan",
2729 )
2830
2931 DESC = '''\
66
77 set -e
88
9 isort -y -rc synapse tests scripts-dev scripts
10 flake8 synapse tests
11 python3 -m black synapse tests scripts-dev scripts
9 if [ $# -ge 1 ]
10 then
11 files=$*
12 else
13 files="synapse tests scripts-dev scripts"
14 fi
15
16 echo "Linting these locations: $files"
17 isort -y -rc $files
18 flake8 $files
19 python3 -m black $files
1220 ./scripts-dev/config-lint.sh
0 #!/usr/bin/env python
1 # -*- coding: utf-8 -*-
2 # Copyright 2019 The Matrix.org Foundation C.I.C.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 import argparse
17 import logging
18 import sys
19
20 import yaml
21
22 from twisted.internet import defer, reactor
23
24 from synapse.config.homeserver import HomeServerConfig
25 from synapse.metrics.background_process_metrics import run_as_background_process
26 from synapse.server import HomeServer
27 from synapse.storage import DataStore
28 from synapse.storage.engines import create_engine
29 from synapse.storage.prepare_database import prepare_database
30
31 logger = logging.getLogger("update_database")
32
33
34 class MockHomeserver(HomeServer):
35 DATASTORE_CLASS = DataStore
36
37 def __init__(self, config, database_engine, db_conn, **kwargs):
38 super(MockHomeserver, self).__init__(
39 config.server_name,
40 reactor=reactor,
41 config=config,
42 database_engine=database_engine,
43 **kwargs
44 )
45
46 self.database_engine = database_engine
47 self.db_conn = db_conn
48
49 def get_db_conn(self):
50 return self.db_conn
51
52
53 if __name__ == "__main__":
54 parser = argparse.ArgumentParser(
55 description=(
56 "Updates a synapse database to the latest schema and runs background updates"
57 " on it."
58 )
59 )
60 parser.add_argument("-v", action='store_true')
61 parser.add_argument(
62 "--database-config",
63 type=argparse.FileType('r'),
64 required=True,
65 help="A database config file for either a SQLite3 database or a PostgreSQL one.",
66 )
67
68 args = parser.parse_args()
69
70 logging_config = {
71 "level": logging.DEBUG if args.v else logging.INFO,
72 "format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
73 }
74
75 logging.basicConfig(**logging_config)
76
77 # Load, process and sanity-check the config.
78 hs_config = yaml.safe_load(args.database_config)
79
80 if "database" not in hs_config:
81 sys.stderr.write("The configuration file must have a 'database' section.\n")
82 sys.exit(4)
83
84 config = HomeServerConfig()
85 config.parse_config_dict(hs_config, "", "")
86
87 # Create the database engine and a connection to it.
88 database_engine = create_engine(config.database_config)
89 db_conn = database_engine.module.connect(
90 **{
91 k: v
92 for k, v in config.database_config.get("args", {}).items()
93 if not k.startswith("cp_")
94 }
95 )
96
97 # Update the database to the latest schema.
98 prepare_database(db_conn, database_engine, config=config)
99 db_conn.commit()
100
101 # Instantiate and initialise the homeserver object.
102 hs = MockHomeserver(
103 config,
104 database_engine,
105 db_conn,
106 db_config=config.database_config,
107 )
108 # setup instantiates the store within the homeserver object.
109 hs.setup()
110 store = hs.get_datastore()
111
112 @defer.inlineCallbacks
113 def run_background_updates():
114 yield store.run_background_updates(sleep=False)
115 # Stop the reactor to exit the script once every background update is run.
116 reactor.stop()
117
118 # Apply all background updates on the database.
119 reactor.callWhenRunning(lambda: run_as_background_process(
120 "background_updates", run_background_updates
121 ))
122
123 reactor.run()
1919 source: .
2020 plugin: python
2121 python-version: python3
22 python-packages:
23 - '.[all]'
24 build-packages:
25 - libffi-dev
26 - libturbojpeg0-dev
27 - libssl-dev
28 - libxslt1-dev
29 - libpq-dev
30 - zlib1g-dev
31 stage-packages:
32 - libasn1-8-heimdal
33 - libgssapi3-heimdal
34 - libhcrypto4-heimdal
35 - libheimbase1-heimdal
36 - libheimntlm0-heimdal
37 - libhx509-5-heimdal
38 - libkrb5-26-heimdal
39 - libldap-2.4-2
40 - libpq5
41 - libsasl2-2
1313 # See the License for the specific language governing permissions and
1414 # limitations under the License.
1515
16 """ This is a reference implementation of a Matrix home server.
16 """ This is a reference implementation of a Matrix homeserver.
1717 """
1818
1919 import os
3535 except ImportError:
3636 pass
3737
38 __version__ = "1.5.1"
38 __version__ = "1.6.0"
3939
4040 if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)):
4141 # We import here so that we don't have to install a bunch of deps when
143143 logging.captureWarnings(True)
144144
145145 parser = argparse.ArgumentParser(
146 description="Used to register new users with a given home server when"
147 " registration has been disabled. The home server must be"
146 description="Used to register new users with a given homeserver when"
147 " registration has been disabled. The homeserver must be"
148148 " configured with the 'registration_shared_secret' option"
149149 " set."
150150 )
201201 "server_url",
202202 default="https://localhost:8448",
203203 nargs="?",
204 help="URL to use to talk to the home server. Defaults to "
204 help="URL to use to talk to the homeserver. Defaults to "
205205 " 'https://localhost:8448'.",
206206 )
207207
496496 token = self.get_access_token_from_request(request)
497497 service = self.store.get_app_service_by_token(token)
498498 if not service:
499 logger.warn("Unrecognised appservice access token.")
499 logger.warning("Unrecognised appservice access token.")
500500 raise InvalidClientTokenError()
501501 request.authenticated_entity = service.sender
502502 return defer.succeed(service)
137137
138138 MONTHLY_ACTIVE_USER = "monthly_active_user"
139139 HS_DISABLED = "hs_disabled"
140
141
142 class EventContentFields(object):
143 """Fields found in events' content, regardless of type."""
144
145 # Labels for the event, cf https://github.com/matrix-org/matrix-doc/pull/2326
146 LABELS = "org.matrix.labels"
456456
457457
458458 class FederationError(RuntimeError):
459 """ This class is used to inform remote home servers about erroneous
459 """ This class is used to inform remote homeservers about erroneous
460460 PDUs they sent us.
461461
462462 FATAL: The remote server could not interpret the source event.
1919
2020 from twisted.internet import defer
2121
22 from synapse.api.constants import EventContentFields
2223 from synapse.api.errors import SynapseError
2324 from synapse.storage.presence import UserPresenceState
2425 from synapse.types import RoomID, UserID
6566 "contains_url": {"type": "boolean"},
6667 "lazy_load_members": {"type": "boolean"},
6768 "include_redundant_members": {"type": "boolean"},
69 # Include or exclude events with the provided labels.
70 # cf https://github.com/matrix-org/matrix-doc/pull/2326
71 "org.matrix.labels": {"type": "array", "items": {"type": "string"}},
72 "org.matrix.not_labels": {"type": "array", "items": {"type": "string"}},
6873 },
6974 }
7075
258263
259264 self.contains_url = self.filter_json.get("contains_url", None)
260265
266 self.labels = self.filter_json.get("org.matrix.labels", None)
267 self.not_labels = self.filter_json.get("org.matrix.not_labels", [])
268
261269 def filters_all_types(self):
262270 return "*" in self.not_types
263271
281289 room_id = None
282290 ev_type = "m.presence"
283291 contains_url = False
292 labels = []
284293 else:
285294 sender = event.get("sender", None)
286295 if not sender:
299308 content = event.get("content", {})
300309 # check if there is a string url field in the content for filtering purposes
301310 contains_url = isinstance(content.get("url"), text_type)
302
303 return self.check_fields(room_id, sender, ev_type, contains_url)
304
305 def check_fields(self, room_id, sender, event_type, contains_url):
311 labels = content.get(EventContentFields.LABELS, [])
312
313 return self.check_fields(room_id, sender, ev_type, labels, contains_url)
314
315 def check_fields(self, room_id, sender, event_type, labels, contains_url):
306316 """Checks whether the filter matches the given event fields.
307317
308318 Returns:
312322 "rooms": lambda v: room_id == v,
313323 "senders": lambda v: sender == v,
314324 "types": lambda v: _matches_wildcard(event_type, v),
325 "labels": lambda v: v in labels,
315326 }
316327
317328 for name, match_func in literal_keys.items():
4343 bind_addresses (list): Addresses on which the service listens.
4444 """
4545 if address == "0.0.0.0" and "::" in bind_addresses:
46 logger.warn("Failed to listen on 0.0.0.0, continuing because listening on [::]")
46 logger.warning(
47 "Failed to listen on 0.0.0.0, continuing because listening on [::]"
48 )
4749 else:
4850 raise e
9393 )
9494 elif listener["type"] == "metrics":
9595 if not self.get_config().enable_metrics:
96 logger.warn(
96 logger.warning(
9797 (
9898 "Metrics listener configured, but "
9999 "enable_metrics is not True!"
102102 else:
103103 _base.listen_metrics(listener["bind_addresses"], listener["port"])
104104 else:
105 logger.warn("Unrecognized listener type: %s", listener["type"])
105 logger.warning("Unrecognized listener type: %s", listener["type"])
106106
107107 self.get_tcp_replication().start_replication(self)
108108
152152 )
153153 elif listener["type"] == "metrics":
154154 if not self.get_config().enable_metrics:
155 logger.warn(
155 logger.warning(
156156 (
157157 "Metrics listener configured, but "
158158 "enable_metrics is not True!"
161161 else:
162162 _base.listen_metrics(listener["bind_addresses"], listener["port"])
163163 else:
164 logger.warn("Unrecognized listener type: %s", listener["type"])
164 logger.warning("Unrecognized listener type: %s", listener["type"])
165165
166166 self.get_tcp_replication().start_replication(self)
167167
146146 )
147147 elif listener["type"] == "metrics":
148148 if not self.get_config().enable_metrics:
149 logger.warn(
149 logger.warning(
150150 (
151151 "Metrics listener configured, but "
152152 "enable_metrics is not True!"
155155 else:
156156 _base.listen_metrics(listener["bind_addresses"], listener["port"])
157157 else:
158 logger.warn("Unrecognized listener type: %s", listener["type"])
158 logger.warning("Unrecognized listener type: %s", listener["type"])
159159
160160 self.get_tcp_replication().start_replication(self)
161161
131131 )
132132 elif listener["type"] == "metrics":
133133 if not self.get_config().enable_metrics:
134 logger.warn(
134 logger.warning(
135135 (
136136 "Metrics listener configured, but "
137137 "enable_metrics is not True!"
140140 else:
141141 _base.listen_metrics(listener["bind_addresses"], listener["port"])
142142 else:
143 logger.warn("Unrecognized listener type: %s", listener["type"])
143 logger.warning("Unrecognized listener type: %s", listener["type"])
144144
145145 self.get_tcp_replication().start_replication(self)
146146
122122 )
123123 elif listener["type"] == "metrics":
124124 if not self.get_config().enable_metrics:
125 logger.warn(
125 logger.warning(
126126 (
127127 "Metrics listener configured, but "
128128 "enable_metrics is not True!"
131131 else:
132132 _base.listen_metrics(listener["bind_addresses"], listener["port"])
133133 else:
134 logger.warn("Unrecognized listener type: %s", listener["type"])
134 logger.warning("Unrecognized listener type: %s", listener["type"])
135135
136136 self.get_tcp_replication().start_replication(self)
137137
203203 )
204204 elif listener["type"] == "metrics":
205205 if not self.get_config().enable_metrics:
206 logger.warn(
206 logger.warning(
207207 (
208208 "Metrics listener configured, but "
209209 "enable_metrics is not True!"
212212 else:
213213 _base.listen_metrics(listener["bind_addresses"], listener["port"])
214214 else:
215 logger.warn("Unrecognized listener type: %s", listener["type"])
215 logger.warning("Unrecognized listener type: %s", listener["type"])
216216
217217 self.get_tcp_replication().start_replication(self)
218218
1818
1919 import gc
2020 import logging
21 import math
2122 import os
23 import resource
2224 import sys
2325
2426 from six import iteritems
2527
26 import psutil
2728 from prometheus_client import Gauge
2829
2930 from twisted.application import service
281282 reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
282283 elif listener["type"] == "metrics":
283284 if not self.get_config().enable_metrics:
284 logger.warn(
285 logger.warning(
285286 (
286287 "Metrics listener configured, but "
287288 "enable_metrics is not True!"
290291 else:
291292 _base.listen_metrics(listener["bind_addresses"], listener["port"])
292293 else:
293 logger.warn("Unrecognized listener type: %s", listener["type"])
294 logger.warning("Unrecognized listener type: %s", listener["type"])
294295
295296 def run_startup_checks(self, db_conn, database_engine):
296297 all_users_native = are_all_users_on_domain(
470471 return self._port.stopListening()
471472
472473
474 # Contains the list of processes we will be monitoring
475 # currently either 0 or 1
476 _stats_process = []
477
478
479 @defer.inlineCallbacks
480 def phone_stats_home(hs, stats, stats_process=_stats_process):
481 logger.info("Gathering stats for reporting")
482 now = int(hs.get_clock().time())
483 uptime = int(now - hs.start_time)
484 if uptime < 0:
485 uptime = 0
486
487 stats["homeserver"] = hs.config.server_name
488 stats["server_context"] = hs.config.server_context
489 stats["timestamp"] = now
490 stats["uptime_seconds"] = uptime
491 version = sys.version_info
492 stats["python_version"] = "{}.{}.{}".format(
493 version.major, version.minor, version.micro
494 )
495 stats["total_users"] = yield hs.get_datastore().count_all_users()
496
497 total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
498 stats["total_nonbridged_users"] = total_nonbridged_users
499
500 daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
501 for name, count in iteritems(daily_user_type_results):
502 stats["daily_user_type_" + name] = count
503
504 room_count = yield hs.get_datastore().get_room_count()
505 stats["total_room_count"] = room_count
506
507 stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
508 stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
509 stats["daily_active_rooms"] = yield hs.get_datastore().count_daily_active_rooms()
510 stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
511
512 r30_results = yield hs.get_datastore().count_r30_users()
513 for name, count in iteritems(r30_results):
514 stats["r30_users_" + name] = count
515
516 daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
517 stats["daily_sent_messages"] = daily_sent_messages
518 stats["cache_factor"] = CACHE_SIZE_FACTOR
519 stats["event_cache_size"] = hs.config.event_cache_size
520
521 #
522 # Performance statistics
523 #
524 old = stats_process[0]
525 new = (now, resource.getrusage(resource.RUSAGE_SELF))
526 stats_process[0] = new
527
528 # Get RSS in bytes
529 stats["memory_rss"] = new[1].ru_maxrss
530
531 # Get CPU time in % of a single core, not % of all cores
532 used_cpu_time = (new[1].ru_utime + new[1].ru_stime) - (
533 old[1].ru_utime + old[1].ru_stime
534 )
535 if used_cpu_time == 0 or new[0] == old[0]:
536 stats["cpu_average"] = 0
537 else:
538 stats["cpu_average"] = math.floor(used_cpu_time / (new[0] - old[0]) * 100)
539
540 #
541 # Database version
542 #
543
544 stats["database_engine"] = hs.get_datastore().database_engine_name
545 stats["database_server_version"] = hs.get_datastore().get_server_version()
546 logger.info("Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats))
547 try:
548 yield hs.get_proxied_http_client().put_json(
549 hs.config.report_stats_endpoint, stats
550 )
551 except Exception as e:
552 logger.warning("Error reporting stats: %s", e)
553
554
473555 def run(hs):
474556 PROFILE_SYNAPSE = False
475557 if PROFILE_SYNAPSE:
496578 reactor.run = profile(reactor.run)
497579
498580 clock = hs.get_clock()
499 start_time = clock.time()
500581
501582 stats = {}
502583
503 # Contains the list of processes we will be monitoring
504 # currently either 0 or 1
505 stats_process = []
584 def performance_stats_init():
585 _stats_process.clear()
586 _stats_process.append(
587 (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF))
588 )
506589
507590 def start_phone_stats_home():
508 return run_as_background_process("phone_stats_home", phone_stats_home)
509
510 @defer.inlineCallbacks
511 def phone_stats_home():
512 logger.info("Gathering stats for reporting")
513 now = int(hs.get_clock().time())
514 uptime = int(now - start_time)
515 if uptime < 0:
516 uptime = 0
517
518 stats["homeserver"] = hs.config.server_name
519 stats["server_context"] = hs.config.server_context
520 stats["timestamp"] = now
521 stats["uptime_seconds"] = uptime
522 version = sys.version_info
523 stats["python_version"] = "{}.{}.{}".format(
524 version.major, version.minor, version.micro
525 )
526 stats["total_users"] = yield hs.get_datastore().count_all_users()
527
528 total_nonbridged_users = yield hs.get_datastore().count_nonbridged_users()
529 stats["total_nonbridged_users"] = total_nonbridged_users
530
531 daily_user_type_results = yield hs.get_datastore().count_daily_user_type()
532 for name, count in iteritems(daily_user_type_results):
533 stats["daily_user_type_" + name] = count
534
535 room_count = yield hs.get_datastore().get_room_count()
536 stats["total_room_count"] = room_count
537
538 stats["daily_active_users"] = yield hs.get_datastore().count_daily_users()
539 stats["monthly_active_users"] = yield hs.get_datastore().count_monthly_users()
540 stats[
541 "daily_active_rooms"
542 ] = yield hs.get_datastore().count_daily_active_rooms()
543 stats["daily_messages"] = yield hs.get_datastore().count_daily_messages()
544
545 r30_results = yield hs.get_datastore().count_r30_users()
546 for name, count in iteritems(r30_results):
547 stats["r30_users_" + name] = count
548
549 daily_sent_messages = yield hs.get_datastore().count_daily_sent_messages()
550 stats["daily_sent_messages"] = daily_sent_messages
551 stats["cache_factor"] = CACHE_SIZE_FACTOR
552 stats["event_cache_size"] = hs.config.event_cache_size
553
554 if len(stats_process) > 0:
555 stats["memory_rss"] = 0
556 stats["cpu_average"] = 0
557 for process in stats_process:
558 stats["memory_rss"] += process.memory_info().rss
559 stats["cpu_average"] += int(process.cpu_percent(interval=None))
560
561 stats["database_engine"] = hs.get_datastore().database_engine_name
562 stats["database_server_version"] = hs.get_datastore().get_server_version()
563 logger.info(
564 "Reporting stats to %s: %s" % (hs.config.report_stats_endpoint, stats)
565 )
566 try:
567 yield hs.get_simple_http_client().put_json(
568 hs.config.report_stats_endpoint, stats
569 )
570 except Exception as e:
571 logger.warn("Error reporting stats: %s", e)
572
573 def performance_stats_init():
574 try:
575 process = psutil.Process()
576 # Ensure we can fetch both, and make the initial request for cpu_percent
577 # so the next request will use this as the initial point.
578 process.memory_info().rss
579 process.cpu_percent(interval=None)
580 logger.info("report_stats can use psutil")
581 stats_process.append(process)
582 except (AttributeError):
583 logger.warning("Unable to read memory/cpu stats. Disabling reporting.")
591 return run_as_background_process(
592 "phone_stats_home", phone_stats_home, hs, stats
593 )
584594
585595 def generate_user_daily_visit_stats():
586596 return run_as_background_process(
119119 )
120120 elif listener["type"] == "metrics":
121121 if not self.get_config().enable_metrics:
122 logger.warn(
122 logger.warning(
123123 (
124124 "Metrics listener configured, but "
125125 "enable_metrics is not True!"
128128 else:
129129 _base.listen_metrics(listener["bind_addresses"], listener["port"])
130130 else:
131 logger.warn("Unrecognized listener type: %s", listener["type"])
131 logger.warning("Unrecognized listener type: %s", listener["type"])
132132
133133 self.get_tcp_replication().start_replication(self)
134134
113113 )
114114 elif listener["type"] == "metrics":
115115 if not self.get_config().enable_metrics:
116 logger.warn(
116 logger.warning(
117117 (
118118 "Metrics listener configured, but "
119119 "enable_metrics is not True!"
122122 else:
123123 _base.listen_metrics(listener["bind_addresses"], listener["port"])
124124 else:
125 logger.warn("Unrecognized listener type: %s", listener["type"])
125 logger.warning("Unrecognized listener type: %s", listener["type"])
126126
127127 self.get_tcp_replication().start_replication(self)
128128
325325 )
326326 elif listener["type"] == "metrics":
327327 if not self.get_config().enable_metrics:
328 logger.warn(
328 logger.warning(
329329 (
330330 "Metrics listener configured, but "
331331 "enable_metrics is not True!"
334334 else:
335335 _base.listen_metrics(listener["bind_addresses"], listener["port"])
336336 else:
337 logger.warn("Unrecognized listener type: %s", listener["type"])
337 logger.warning("Unrecognized listener type: %s", listener["type"])
338338
339339 self.get_tcp_replication().start_replication(self)
340340
149149 )
150150 elif listener["type"] == "metrics":
151151 if not self.get_config().enable_metrics:
152 logger.warn(
152 logger.warning(
153153 (
154154 "Metrics listener configured, but "
155155 "enable_metrics is not True!"
158158 else:
159159 _base.listen_metrics(listener["bind_addresses"], listener["port"])
160160 else:
161 logger.warn("Unrecognized listener type: %s", listener["type"])
161 logger.warning("Unrecognized listener type: %s", listener["type"])
162162
163163 self.get_tcp_replication().start_replication(self)
164164
9393 ip_range_whitelist=None,
9494 ):
9595 self.token = token
96 self.url = url
96 self.url = (
97 url.rstrip("/") if isinstance(url, str) else None
98 ) # url must not end with a slash
9799 self.hs_token = hs_token
98100 self.sender = sender
99101 self.server_name = hostname
3434 ## Captcha ##
3535 # See docs/CAPTCHA_SETUP for full details of configuring this.
3636
37 # This Home Server's ReCAPTCHA public key.
37 # This homeserver's ReCAPTCHA public key.
3838 #
3939 #recaptcha_public_key: "YOUR_PUBLIC_KEY"
4040
41 # This Home Server's ReCAPTCHA private key.
41 # This homeserver's ReCAPTCHA private key.
4242 #
4343 #recaptcha_private_key: "YOUR_PRIVATE_KEY"
4444
304304 # smtp_user: "exampleusername"
305305 # smtp_pass: "examplepassword"
306306 # require_transport_security: false
307 # notif_from: "Your Friendly %(app)s Home Server <noreply@example.com>"
307 # notif_from: "Your Friendly %(app)s homeserver <noreply@example.com>"
308308 # app_name: Matrix
309309 #
310310 # # Enable email notifications by default
124124
125125 # if neither trusted_key_servers nor perspectives are given, use the default.
126126 if "perspectives" not in config and "trusted_key_servers" not in config:
127 logger.warn(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
127 logger.warning(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
128128 key_servers = [{"server_name": "matrix.org"}]
129129 else:
130130 key_servers = config.get("trusted_key_servers", [])
155155 if not self.macaroon_secret_key:
156156 # Unfortunately, there are people out there that don't have this
157157 # set. Lets just be "nice" and derive one from their secret key.
158 logger.warn("Config is missing macaroon_secret_key")
158 logger.warning("Config is missing macaroon_secret_key")
159159 seed = bytes(self.signing_key[0])
160160 self.macaroon_secret_key = hashlib.sha256(seed).digest()
161161
181181 logger = logging.getLogger("")
182182
183183 if not log_config:
184 logger.warn("Reloaded a blank config?")
184 logger.warning("Reloaded a blank config?")
185185
186186 logging.config.dictConfig(log_config)
187187
233233
234234 # make sure that the first thing we log is a thing we can grep backwards
235235 # for
236 logging.warn("***** STARTING SERVER *****")
237 logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse))
236 logging.warning("***** STARTING SERVER *****")
237 logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse))
238238 logging.info("Server hostname: %s", config.server_name)
239239
240240 return logger
299299 # If a delegate is specified, the config option public_baseurl must also be filled out.
300300 #
301301 account_threepid_delegates:
302 #email: https://example.com # Delegate email sending to example.org
302 #email: https://example.com # Delegate email sending to example.com
303303 #msisdn: http://localhost:8090 # Delegate SMS sending to this local process
304304
305305 # Users who register on this homeserver will automatically be joined
4040 # in the list.
4141 DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
4242
43 DEFAULT_ROOM_VERSION = "4"
43 DEFAULT_ROOM_VERSION = "5"
4444
4545 ROOM_COMPLEXITY_TOO_GREAT = (
4646 "Your homeserver is unable to join rooms this large or complex. "
720720 # Used by phonehome stats to group together related servers.
721721 #server_context: context
722722
723 # Resource-constrained Homeserver Settings
723 # Resource-constrained homeserver Settings
724724 #
725725 # If limit_remote_rooms.enabled is True, the room complexity will be
726726 # checked before a user joins a new remote room. If it is above
780780 "--daemonize",
781781 action="store_true",
782782 default=None,
783 help="Daemonize the home server",
783 help="Daemonize the homeserver",
784784 )
785785 server_group.add_argument(
786786 "--print-pidfile",
124124 redact_json = prune_event_dict(event_dict)
125125 redact_json.pop("age_ts", None)
126126 redact_json.pop("unsigned", None)
127 logger.debug("Signing event: %s", encode_canonical_json(redact_json))
127 if logger.isEnabledFor(logging.DEBUG):
128 logger.debug("Signing event: %s", encode_canonical_json(redact_json))
128129 redact_json = sign_json(redact_json, signature_name, signing_key)
129 logger.debug("Signed event: %s", encode_canonical_json(redact_json))
130 if logger.isEnabledFor(logging.DEBUG):
131 logger.debug("Signed event: %s", encode_canonical_json(redact_json))
130132 return redact_json["signatures"]
131133
132134
7676 if auth_events is None:
7777 # Oh, we don't know what the state of the room was, so we
7878 # are trusting that this is allowed (at least for now)
79 logger.warn("Trusting event: %s", event.event_id)
79 logger.warning("Trusting event: %s", event.event_id)
8080 return
8181
8282 if event.type == EventTypes.Create:
1111 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212 # See the License for the specific language governing permissions and
1313 # limitations under the License.
14 from typing import Dict, Optional, Tuple, Union
1415
1516 from six import iteritems
1617
18 import attr
1719 from frozendict import frozendict
1820
1921 from twisted.internet import defer
2022
23 from synapse.appservice import ApplicationService
2124 from synapse.logging.context import make_deferred_yieldable, run_in_background
2225
2326
24 class EventContext(object):
25 """
27 @attr.s(slots=True)
28 class EventContext:
29 """
30 Holds information relevant to persisting an event
31
2632 Attributes:
27 state_group (int|None): state group id, if the state has been stored
28 as a state group. This is usually only None if e.g. the event is
29 an outlier.
30 rejected (bool|str): A rejection reason if the event was rejected, else
31 False
32
33 push_actions (list[(str, list[object])]): list of (user_id, actions)
34 tuples
35
36 prev_group (int): Previously persisted state group. ``None`` for an
37 outlier.
38 delta_ids (dict[(str, str), str]): Delta from ``prev_group``.
39 (type, state_key) -> event_id. ``None`` for an outlier.
40
41 prev_state_events (?): XXX: is this ever set to anything other than
42 the empty list?
43
44 _current_state_ids (dict[(str, str), str]|None):
45 The current state map including the current event. None if outlier
46 or we haven't fetched the state from DB yet.
33 rejected: A rejection reason if the event was rejected, else False
34
35 _state_group: The ID of the state group for this event. Note that state events
36 are persisted with a state group which includes the new event, so this is
37 effectively the state *after* the event in question.
38
39 For a *rejected* state event, where the state of the rejected event is
40 ignored, this state_group should never make it into the
41 event_to_state_groups table. Indeed, inspecting this value for a rejected
42 state event is almost certainly incorrect.
43
44 For an outlier, where we don't have the state at the event, this will be
45 None.
46
47 Note that this is a private attribute: it should be accessed via
48 the ``state_group`` property.
49
50 state_group_before_event: The ID of the state group representing the state
51 of the room before this event.
52
53 If this is a non-state event, this will be the same as ``state_group``. If
54 it's a state event, it will be the same as ``prev_group``.
55
56 If ``state_group`` is None (ie, the event is an outlier),
57 ``state_group_before_event`` will always also be ``None``.
58
59 prev_group: If it is known, ``state_group``'s prev_group. Note that this being
60 None does not necessarily mean that ``state_group`` does not have
61 a prev_group!
62
63 If the event is a state event, this is normally the same as ``prev_group``.
64
65 If ``state_group`` is None (ie, the event is an outlier), ``prev_group``
66 will always also be ``None``.
67
68 Note that this *not* (necessarily) the state group associated with
69 ``_prev_state_ids``.
70
71 delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group``
72 and ``state_group``.
73
74 app_service: If this event is being sent by a (local) application service, that
75 app service.
76
77 _current_state_ids: The room state map, including this event - ie, the state
78 in ``state_group``.
79
4780 (type, state_key) -> event_id
4881
49 _prev_state_ids (dict[(str, str), str]|None):
50 The current state map excluding the current event. None if outlier
51 or we haven't fetched the state from DB yet.
82 FIXME: what is this for an outlier? it seems ill-defined. It seems like
83 it could be either {}, or the state we were given by the remote
84 server, depending on $THINGS
85
86 Note that this is a private attribute: it should be accessed via
87 ``get_current_state_ids``. _AsyncEventContext impl calculates this
88 on-demand: it will be None until that happens.
89
90 _prev_state_ids: The room state map, excluding this event - ie, the state
91 in ``state_group_before_event``. For a non-state
92 event, this will be the same as _current_state_events.
93
94 Note that it is a completely different thing to prev_group!
95
5296 (type, state_key) -> event_id
5397
54 _fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
55 been calculated. None if we haven't started calculating yet
56
57 _event_type (str): The type of the event the context is associated with.
58 Only set when state has not been fetched yet.
59
60 _event_state_key (str|None): The state_key of the event the context is
61 associated with. Only set when state has not been fetched yet.
62
63 _prev_state_id (str|None): If the event associated with the context is
64 a state event, then `_prev_state_id` is the event_id of the state
65 that was replaced.
66 Only set when state has not been fetched yet.
67 """
68
69 __slots__ = [
70 "state_group",
71 "rejected",
72 "prev_group",
73 "delta_ids",
74 "prev_state_events",
75 "app_service",
76 "_current_state_ids",
77 "_prev_state_ids",
78 "_prev_state_id",
79 "_event_type",
80 "_event_state_key",
81 "_fetching_state_deferred",
82 ]
83
84 def __init__(self):
85 self.prev_state_events = []
86 self.rejected = False
87 self.app_service = None
98 FIXME: again, what is this for an outlier?
99
100 As with _current_state_ids, this is a private attribute. It should be
101 accessed via get_prev_state_ids.
102 """
103
104 rejected = attr.ib(default=False, type=Union[bool, str])
105 _state_group = attr.ib(default=None, type=Optional[int])
106 state_group_before_event = attr.ib(default=None, type=Optional[int])
107 prev_group = attr.ib(default=None, type=Optional[int])
108 delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
109 app_service = attr.ib(default=None, type=Optional[ApplicationService])
110
111 _current_state_ids = attr.ib(
112 default=None, type=Optional[Dict[Tuple[str, str], str]]
113 )
114 _prev_state_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]])
88115
89116 @staticmethod
90117 def with_state(
91 state_group, current_state_ids, prev_state_ids, prev_group=None, delta_ids=None
118 state_group,
119 state_group_before_event,
120 current_state_ids,
121 prev_state_ids,
122 prev_group=None,
123 delta_ids=None,
92124 ):
93 context = EventContext()
94
95 # The current state including the current event
96 context._current_state_ids = current_state_ids
97 # The current state excluding the current event
98 context._prev_state_ids = prev_state_ids
99 context.state_group = state_group
100
101 context._prev_state_id = None
102 context._event_type = None
103 context._event_state_key = None
104 context._fetching_state_deferred = defer.succeed(None)
105
106 # A previously persisted state group and a delta between that
107 # and this state.
108 context.prev_group = prev_group
109 context.delta_ids = delta_ids
110
111 return context
125 return EventContext(
126 current_state_ids=current_state_ids,
127 prev_state_ids=prev_state_ids,
128 state_group=state_group,
129 state_group_before_event=state_group_before_event,
130 prev_group=prev_group,
131 delta_ids=delta_ids,
132 )
112133
113134 @defer.inlineCallbacks
114135 def serialize(self, event, store):
136157 "prev_state_id": prev_state_id,
137158 "event_type": event.type,
138159 "event_state_key": event.state_key if event.is_state() else None,
139 "state_group": self.state_group,
160 "state_group": self._state_group,
161 "state_group_before_event": self.state_group_before_event,
140162 "rejected": self.rejected,
141163 "prev_group": self.prev_group,
142164 "delta_ids": _encode_state_dict(self.delta_ids),
143 "prev_state_events": self.prev_state_events,
144165 "app_service_id": self.app_service.id if self.app_service else None,
145166 }
146167
156177 Returns:
157178 EventContext
158179 """
159 context = EventContext()
160
161 # We use the state_group and prev_state_id stuff to pull the
162 # current_state_ids out of the DB and construct prev_state_ids.
163 context._prev_state_id = input["prev_state_id"]
164 context._event_type = input["event_type"]
165 context._event_state_key = input["event_state_key"]
166
167 context._current_state_ids = None
168 context._prev_state_ids = None
169 context._fetching_state_deferred = None
170
171 context.state_group = input["state_group"]
172 context.prev_group = input["prev_group"]
173 context.delta_ids = _decode_state_dict(input["delta_ids"])
174
175 context.rejected = input["rejected"]
176 context.prev_state_events = input["prev_state_events"]
180 context = _AsyncEventContextImpl(
181 # We use the state_group and prev_state_id stuff to pull the
182 # current_state_ids out of the DB and construct prev_state_ids.
183 prev_state_id=input["prev_state_id"],
184 event_type=input["event_type"],
185 event_state_key=input["event_state_key"],
186 state_group=input["state_group"],
187 state_group_before_event=input["state_group_before_event"],
188 prev_group=input["prev_group"],
189 delta_ids=_decode_state_dict(input["delta_ids"]),
190 rejected=input["rejected"],
191 )
177192
178193 app_service_id = input["app_service_id"]
179194 if app_service_id:
181196
182197 return context
183198
199 @property
200 def state_group(self) -> Optional[int]:
201 """The ID of the state group for this event.
202
203 Note that state events are persisted with a state group which includes the new
204 event, so this is effectively the state *after* the event in question.
205
206 For an outlier, where we don't have the state at the event, this will be None.
207
208 It is an error to access this for a rejected event, since rejected state should
209 not make it into the room state. Accessing this property will raise an exception
210 if ``rejected`` is set.
211 """
212 if self.rejected:
213 raise RuntimeError("Attempt to access state_group of rejected event")
214
215 return self._state_group
216
184217 @defer.inlineCallbacks
185218 def get_current_state_ids(self, store):
186 """Gets the current state IDs
219 """
220 Gets the room state map, including this event - ie, the state in ``state_group``
221
222 It is an error to access this for a rejected event, since rejected state should
223 not make it into the room state. This method will raise an exception if
224 ``rejected`` is set.
225
226 Returns:
227 Deferred[dict[(str, str), str]|None]: Returns None if state_group
228 is None, which happens when the associated event is an outlier.
229
230 Maps a (type, state_key) to the event ID of the state event matching
231 this tuple.
232 """
233 if self.rejected:
234 raise RuntimeError("Attempt to access state_ids of rejected event")
235
236 yield self._ensure_fetched(store)
237 return self._current_state_ids
238
239 @defer.inlineCallbacks
240 def get_prev_state_ids(self, store):
241 """
242 Gets the room state map, excluding this event.
243
244 For a non-state event, this will be the same as get_current_state_ids().
187245
188246 Returns:
189247 Deferred[dict[(str, str), str]|None]: Returns None if state_group
191249 Maps a (type, state_key) to the event ID of the state event matching
192250 this tuple.
193251 """
194
252 yield self._ensure_fetched(store)
253 return self._prev_state_ids
254
255 def get_cached_current_state_ids(self):
256 """Gets the current state IDs if we have them already cached.
257
258 It is an error to access this for a rejected event, since rejected state should
259 not make it into the room state. This method will raise an exception if
260 ``rejected`` is set.
261
262 Returns:
263 dict[(str, str), str]|None: Returns None if we haven't cached the
264 state or if state_group is None, which happens when the associated
265 event is an outlier.
266 """
267 if self.rejected:
268 raise RuntimeError("Attempt to access state_ids of rejected event")
269
270 return self._current_state_ids
271
272 def _ensure_fetched(self, store):
273 return defer.succeed(None)
274
275
276 @attr.s(slots=True)
277 class _AsyncEventContextImpl(EventContext):
278 """
279 An implementation of EventContext which fetches _current_state_ids and
280 _prev_state_ids from the database on demand.
281
282 Attributes:
283
284 _fetching_state_deferred (Deferred|None): Resolves when *_state_ids have
285 been calculated. None if we haven't started calculating yet
286
287 _event_type (str): The type of the event the context is associated with.
288
289 _event_state_key (str): The state_key of the event the context is
290 associated with.
291
292 _prev_state_id (str|None): If the event associated with the context is
293 a state event, then `_prev_state_id` is the event_id of the state
294 that was replaced.
295 """
296
297 _prev_state_id = attr.ib(default=None)
298 _event_type = attr.ib(default=None)
299 _event_state_key = attr.ib(default=None)
300 _fetching_state_deferred = attr.ib(default=None)
301
302 def _ensure_fetched(self, store):
195303 if not self._fetching_state_deferred:
196304 self._fetching_state_deferred = run_in_background(
197305 self._fill_out_state, store
198306 )
199307
200 yield make_deferred_yieldable(self._fetching_state_deferred)
201
202 return self._current_state_ids
203
204 @defer.inlineCallbacks
205 def get_prev_state_ids(self, store):
206 """Gets the prev state IDs
207
208 Returns:
209 Deferred[dict[(str, str), str]|None]: Returns None if state_group
210 is None, which happens when the associated event is an outlier.
211 Maps a (type, state_key) to the event ID of the state event matching
212 this tuple.
213 """
214
215 if not self._fetching_state_deferred:
216 self._fetching_state_deferred = run_in_background(
217 self._fill_out_state, store
218 )
219
220 yield make_deferred_yieldable(self._fetching_state_deferred)
221
222 return self._prev_state_ids
223
224 def get_cached_current_state_ids(self):
225 """Gets the current state IDs if we have them already cached.
226
227 Returns:
228 dict[(str, str), str]|None: Returns None if we haven't cached the
229 state or if state_group is None, which happens when the associated
230 event is an outlier.
231 """
232
233 return self._current_state_ids
308 return make_deferred_yieldable(self._fetching_state_deferred)
234309
235310 @defer.inlineCallbacks
236311 def _fill_out_state(self, store):
249324 else:
250325 self._prev_state_ids = self._current_state_ids
251326
252 @defer.inlineCallbacks
253 def update_state(
254 self, state_group, prev_state_ids, current_state_ids, prev_group, delta_ids
255 ):
256 """Replace the state in the context
257 """
258
259 # We need to make sure we wait for any ongoing fetching of state
260 # to complete so that the updated state doesn't get clobbered
261 if self._fetching_state_deferred:
262 yield make_deferred_yieldable(self._fetching_state_deferred)
263
264 self.state_group = state_group
265 self._prev_state_ids = prev_state_ids
266 self.prev_group = prev_group
267 self._current_state_ids = current_state_ids
268 self.delta_ids = delta_ids
269
270 # We need to ensure that that we've marked as having fetched the state
271 self._fetching_state_deferred = defer.succeed(None)
272
273327
274328 def _encode_state_dict(state_dict):
275329 """Since dicts of (type, state_key) -> event_id cannot be serialized in
00 # -*- coding: utf-8 -*-
11 # Copyright 2017 New Vector Ltd
2 # Copyright 2019 The Matrix.org Foundation C.I.C.
23 #
34 # Licensed under the Apache License, Version 2.0 (the "License");
45 # you may not use this file except in compliance with the License.
1112 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1213 # See the License for the specific language governing permissions and
1314 # limitations under the License.
15
16 import inspect
17
18 from synapse.spam_checker_api import SpamCheckerApi
1419
1520
1621 class SpamChecker(object):
2530 pass
2631
2732 if module is not None:
28 self.spam_checker = module(config=config)
33 # Older spam checkers don't accept the `api` argument, so we
34 # try and detect support.
35 spam_args = inspect.getfullargspec(module)
36 if "api" in spam_args.args:
37 api = SpamCheckerApi(hs)
38 self.spam_checker = module(config=config, api=api)
39 else:
40 self.spam_checker = module(config=config)
2941
3042 def check_event_for_spam(self, event):
3143 """Checks if a given event is considered "spammy" by this server.
101101 pass
102102
103103 if not res:
104 logger.warn(
104 logger.warning(
105105 "Failed to find copy of %s with valid signature", pdu.event_id
106106 )
107107
172172 return redacted_event
173173
174174 if self.spam_checker.check_event_for_spam(pdu):
175 logger.warn(
175 logger.warning(
176176 "Event contains spam, redacting %s: %s",
177177 pdu.event_id,
178178 pdu.get_pdu_json(),
184184 def errback(failure, pdu):
185185 failure.trap(SynapseError)
186186 with PreserveLoggingContext(ctx):
187 logger.warn(
187 logger.warning(
188188 "Signature check failed for %s: %s",
189189 pdu.event_id,
190190 failure.getErrorMessage(),
176176 given destination server.
177177
178178 Args:
179 dest (str): The remote home server to ask.
179 dest (str): The remote homeserver to ask.
180180 room_id (str): The room_id to backfill.
181181 limit (int): The maximum number of PDUs to return.
182182 extremities (list): List of PDU id and origins of the first pdus
195195 dest, room_id, extremities, limit
196196 )
197197
198 logger.debug("backfill transaction_data=%s", repr(transaction_data))
198 logger.debug("backfill transaction_data=%r", transaction_data)
199199
200200 room_version = yield self.store.get_room_version(room_id)
201201 format_ver = room_version_to_event_format(room_version)
226226 one succeeds.
227227
228228 Args:
229 destinations (list): Which home servers to query
229 destinations (list): Which homeservers to query
230230 event_id (str): event to fetch
231231 room_version (str): version of the room
232232 outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if
311311 @defer.inlineCallbacks
312312 @log_function
313313 def get_state_for_room(self, destination, room_id, event_id):
314 """Requests all of the room state at a given event from a remote home server.
314 """Requests all of the room state at a given event from a remote homeserver.
315315
316316 Args:
317317 destination (str): The remote homeserver to query for the state.
521521 res = yield callback(destination)
522522 return res
523523 except InvalidResponseError as e:
524 logger.warn("Failed to %s via %s: %s", description, destination, e)
524 logger.warning("Failed to %s via %s: %s", description, destination, e)
525525 except HttpResponseException as e:
526526 if not 500 <= e.code < 600:
527527 raise e.to_synapse_error()
528528 else:
529 logger.warn(
529 logger.warning(
530530 "Failed to %s via %s: %i %s",
531531 description,
532532 destination,
534534 e.args[0],
535535 )
536536 except Exception:
537 logger.warn("Failed to %s via %s", description, destination, exc_info=1)
537 logger.warning(
538 "Failed to %s via %s", description, destination, exc_info=1
539 )
538540
539541 raise SynapseError(502, "Failed to %s via any server" % (description,))
540542
552554 Note that this does not append any events to any graphs.
553555
554556 Args:
555 destinations (str): Candidate homeservers which are probably
557 destinations (Iterable[str]): Candidate homeservers which are probably
556558 participating in the room.
557559 room_id (str): The room in which the event will happen.
558560 user_id (str): The user whose membership is being evented.
2020 from canonicaljson import json
2121 from prometheus_client import Counter
2222
23 from twisted.internet import defer
2423 from twisted.internet.abstract import isIPAddress
2524 from twisted.python import failure
2625
8584 # come in waves.
8685 self._state_resp_cache = ResponseCache(hs, "state_resp", timeout_ms=30000)
8786
88 @defer.inlineCallbacks
89 @log_function
90 def on_backfill_request(self, origin, room_id, versions, limit):
91 with (yield self._server_linearizer.queue((origin, room_id))):
87 async def on_backfill_request(self, origin, room_id, versions, limit):
88 with (await self._server_linearizer.queue((origin, room_id))):
9289 origin_host, _ = parse_server_name(origin)
93 yield self.check_server_matches_acl(origin_host, room_id)
94
95 pdus = yield self.handler.on_backfill_request(
90 await self.check_server_matches_acl(origin_host, room_id)
91
92 pdus = await self.handler.on_backfill_request(
9693 origin, room_id, versions, limit
9794 )
9895
10097
10198 return 200, res
10299
103 @defer.inlineCallbacks
104 @log_function
105 def on_incoming_transaction(self, origin, transaction_data):
100 async def on_incoming_transaction(self, origin, transaction_data):
106101 # keep this as early as possible to make the calculated origin ts as
107102 # accurate as possible.
108103 request_time = self._clock.time_msec()
117112 # use a linearizer to ensure that we don't process the same transaction
118113 # multiple times in parallel.
119114 with (
120 yield self._transaction_linearizer.queue(
115 await self._transaction_linearizer.queue(
121116 (origin, transaction.transaction_id)
122117 )
123118 ):
124 result = yield self._handle_incoming_transaction(
119 result = await self._handle_incoming_transaction(
125120 origin, transaction, request_time
126121 )
127122
128123 return result
129124
130 @defer.inlineCallbacks
131 def _handle_incoming_transaction(self, origin, transaction, request_time):
125 async def _handle_incoming_transaction(self, origin, transaction, request_time):
132126 """ Process an incoming transaction and return the HTTP response
133127
134128 Args:
139133 Returns:
140134 Deferred[(int, object)]: http response code and body
141135 """
142 response = yield self.transaction_actions.have_responded(origin, transaction)
136 response = await self.transaction_actions.have_responded(origin, transaction)
143137
144138 if response:
145139 logger.debug(
150144
151145 logger.debug("[%s] Transaction is new", transaction.transaction_id)
152146
153 # Reject if PDU count > 50 and EDU count > 100
147 # Reject if PDU count > 50 or EDU count > 100
154148 if len(transaction.pdus) > 50 or (
155149 hasattr(transaction, "edus") and len(transaction.edus) > 100
156150 ):
158152 logger.info("Transaction PDU or EDU count too large. Returning 400")
159153
160154 response = {}
161 yield self.transaction_actions.set_response(
155 await self.transaction_actions.set_response(
162156 origin, transaction, 400, response
163157 )
164158 return 400, response
194188 continue
195189
196190 try:
197 room_version = yield self.store.get_room_version(room_id)
191 room_version = await self.store.get_room_version(room_id)
198192 except NotFoundError:
199193 logger.info("Ignoring PDU for unknown room_id: %s", room_id)
200194 continue
220214 # require callouts to other servers to fetch missing events), but
221215 # impose a limit to avoid going too crazy with ram/cpu.
222216
223 @defer.inlineCallbacks
224 def process_pdus_for_room(room_id):
217 async def process_pdus_for_room(room_id):
225218 logger.debug("Processing PDUs for %s", room_id)
226219 try:
227 yield self.check_server_matches_acl(origin_host, room_id)
220 await self.check_server_matches_acl(origin_host, room_id)
228221 except AuthError as e:
229 logger.warn("Ignoring PDUs for room %s from banned server", room_id)
222 logger.warning("Ignoring PDUs for room %s from banned server", room_id)
230223 for pdu in pdus_by_room[room_id]:
231224 event_id = pdu.event_id
232225 pdu_results[event_id] = e.error_dict()
236229 event_id = pdu.event_id
237230 with nested_logging_context(event_id):
238231 try:
239 yield self._handle_received_pdu(origin, pdu)
232 await self._handle_received_pdu(origin, pdu)
240233 pdu_results[event_id] = {}
241234 except FederationError as e:
242 logger.warn("Error handling PDU %s: %s", event_id, e)
235 logger.warning("Error handling PDU %s: %s", event_id, e)
243236 pdu_results[event_id] = {"error": str(e)}
244237 except Exception as e:
245238 f = failure.Failure()
250243 exc_info=(f.type, f.value, f.getTracebackObject()),
251244 )
252245
253 yield concurrently_execute(
246 await concurrently_execute(
254247 process_pdus_for_room, pdus_by_room.keys(), TRANSACTION_CONCURRENCY_LIMIT
255248 )
256249
257250 if hasattr(transaction, "edus"):
258251 for edu in (Edu(**x) for x in transaction.edus):
259 yield self.received_edu(origin, edu.edu_type, edu.content)
252 await self.received_edu(origin, edu.edu_type, edu.content)
260253
261254 response = {"pdus": pdu_results}
262255
263256 logger.debug("Returning: %s", str(response))
264257
265 yield self.transaction_actions.set_response(origin, transaction, 200, response)
258 await self.transaction_actions.set_response(origin, transaction, 200, response)
266259 return 200, response
267260
268 @defer.inlineCallbacks
269 def received_edu(self, origin, edu_type, content):
261 async def received_edu(self, origin, edu_type, content):
270262 received_edus_counter.inc()
271 yield self.registry.on_edu(edu_type, origin, content)
272
273 @defer.inlineCallbacks
274 @log_function
275 def on_context_state_request(self, origin, room_id, event_id):
263 await self.registry.on_edu(edu_type, origin, content)
264
265 async def on_context_state_request(self, origin, room_id, event_id):
276266 if not event_id:
277267 raise NotImplementedError("Specify an event")
278268
279269 origin_host, _ = parse_server_name(origin)
280 yield self.check_server_matches_acl(origin_host, room_id)
281
282 in_room = yield self.auth.check_host_in_room(room_id, origin)
270 await self.check_server_matches_acl(origin_host, room_id)
271
272 in_room = await self.auth.check_host_in_room(room_id, origin)
283273 if not in_room:
284274 raise AuthError(403, "Host not in room.")
285275
288278 # in the cache so we could return it without waiting for the linearizer
289279 # - but that's non-trivial to get right, and anyway somewhat defeats
290280 # the point of the linearizer.
291 with (yield self._server_linearizer.queue((origin, room_id))):
292 resp = yield self._state_resp_cache.wrap(
281 with (await self._server_linearizer.queue((origin, room_id))):
282 resp = await self._state_resp_cache.wrap(
293283 (room_id, event_id),
294284 self._on_context_state_request_compute,
295285 room_id,
298288
299289 return 200, resp
300290
301 @defer.inlineCallbacks
302 def on_state_ids_request(self, origin, room_id, event_id):
291 async def on_state_ids_request(self, origin, room_id, event_id):
303292 if not event_id:
304293 raise NotImplementedError("Specify an event")
305294
306295 origin_host, _ = parse_server_name(origin)
307 yield self.check_server_matches_acl(origin_host, room_id)
308
309 in_room = yield self.auth.check_host_in_room(room_id, origin)
296 await self.check_server_matches_acl(origin_host, room_id)
297
298 in_room = await self.auth.check_host_in_room(room_id, origin)
310299 if not in_room:
311300 raise AuthError(403, "Host not in room.")
312301
313 state_ids = yield self.handler.get_state_ids_for_pdu(room_id, event_id)
314 auth_chain_ids = yield self.store.get_auth_chain_ids(state_ids)
302 state_ids = await self.handler.get_state_ids_for_pdu(room_id, event_id)
303 auth_chain_ids = await self.store.get_auth_chain_ids(state_ids)
315304
316305 return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids}
317306
318 @defer.inlineCallbacks
319 def _on_context_state_request_compute(self, room_id, event_id):
320 pdus = yield self.handler.get_state_for_pdu(room_id, event_id)
321 auth_chain = yield self.store.get_auth_chain([pdu.event_id for pdu in pdus])
307 async def _on_context_state_request_compute(self, room_id, event_id):
308 pdus = await self.handler.get_state_for_pdu(room_id, event_id)
309 auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus])
322310
323311 return {
324312 "pdus": [pdu.get_pdu_json() for pdu in pdus],
325313 "auth_chain": [pdu.get_pdu_json() for pdu in auth_chain],
326314 }
327315
328 @defer.inlineCallbacks
329 @log_function
330 def on_pdu_request(self, origin, event_id):
331 pdu = yield self.handler.get_persisted_pdu(origin, event_id)
316 async def on_pdu_request(self, origin, event_id):
317 pdu = await self.handler.get_persisted_pdu(origin, event_id)
332318
333319 if pdu:
334320 return 200, self._transaction_from_pdus([pdu]).get_dict()
335321 else:
336322 return 404, ""
337323
338 @defer.inlineCallbacks
339 def on_query_request(self, query_type, args):
324 async def on_query_request(self, query_type, args):
340325 received_queries_counter.labels(query_type).inc()
341 resp = yield self.registry.on_query(query_type, args)
326 resp = await self.registry.on_query(query_type, args)
342327 return 200, resp
343328
344 @defer.inlineCallbacks
345 def on_make_join_request(self, origin, room_id, user_id, supported_versions):
329 async def on_make_join_request(self, origin, room_id, user_id, supported_versions):
346330 origin_host, _ = parse_server_name(origin)
347 yield self.check_server_matches_acl(origin_host, room_id)
348
349 room_version = yield self.store.get_room_version(room_id)
331 await self.check_server_matches_acl(origin_host, room_id)
332
333 room_version = await self.store.get_room_version(room_id)
350334 if room_version not in supported_versions:
351 logger.warn("Room version %s not in %s", room_version, supported_versions)
335 logger.warning(
336 "Room version %s not in %s", room_version, supported_versions
337 )
352338 raise IncompatibleRoomVersionError(room_version=room_version)
353339
354 pdu = yield self.handler.on_make_join_request(origin, room_id, user_id)
340 pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
355341 time_now = self._clock.time_msec()
356342 return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
357343
358 @defer.inlineCallbacks
359 def on_invite_request(self, origin, content, room_version):
344 async def on_invite_request(self, origin, content, room_version):
360345 if room_version not in KNOWN_ROOM_VERSIONS:
361346 raise SynapseError(
362347 400,
368353
369354 pdu = event_from_pdu_json(content, format_ver)
370355 origin_host, _ = parse_server_name(origin)
371 yield self.check_server_matches_acl(origin_host, pdu.room_id)
372 pdu = yield self._check_sigs_and_hash(room_version, pdu)
373 ret_pdu = yield self.handler.on_invite_request(origin, pdu)
356 await self.check_server_matches_acl(origin_host, pdu.room_id)
357 pdu = await self._check_sigs_and_hash(room_version, pdu)
358 ret_pdu = await self.handler.on_invite_request(origin, pdu)
374359 time_now = self._clock.time_msec()
375360 return {"event": ret_pdu.get_pdu_json(time_now)}
376361
377 @defer.inlineCallbacks
378 def on_send_join_request(self, origin, content, room_id):
362 async def on_send_join_request(self, origin, content, room_id):
379363 logger.debug("on_send_join_request: content: %s", content)
380364
381 room_version = yield self.store.get_room_version(room_id)
365 room_version = await self.store.get_room_version(room_id)
382366 format_ver = room_version_to_event_format(room_version)
383367 pdu = event_from_pdu_json(content, format_ver)
384368
385369 origin_host, _ = parse_server_name(origin)
386 yield self.check_server_matches_acl(origin_host, pdu.room_id)
370 await self.check_server_matches_acl(origin_host, pdu.room_id)
387371
388372 logger.debug("on_send_join_request: pdu sigs: %s", pdu.signatures)
389373
390 pdu = yield self._check_sigs_and_hash(room_version, pdu)
391
392 res_pdus = yield self.handler.on_send_join_request(origin, pdu)
374 pdu = await self._check_sigs_and_hash(room_version, pdu)
375
376 res_pdus = await self.handler.on_send_join_request(origin, pdu)
393377 time_now = self._clock.time_msec()
394378 return (
395379 200,
401385 },
402386 )
403387
404 @defer.inlineCallbacks
405 def on_make_leave_request(self, origin, room_id, user_id):
388 async def on_make_leave_request(self, origin, room_id, user_id):
406389 origin_host, _ = parse_server_name(origin)
407 yield self.check_server_matches_acl(origin_host, room_id)
408 pdu = yield self.handler.on_make_leave_request(origin, room_id, user_id)
409
410 room_version = yield self.store.get_room_version(room_id)
390 await self.check_server_matches_acl(origin_host, room_id)
391 pdu = await self.handler.on_make_leave_request(origin, room_id, user_id)
392
393 room_version = await self.store.get_room_version(room_id)
411394
412395 time_now = self._clock.time_msec()
413396 return {"event": pdu.get_pdu_json(time_now), "room_version": room_version}
414397
415 @defer.inlineCallbacks
416 def on_send_leave_request(self, origin, content, room_id):
398 async def on_send_leave_request(self, origin, content, room_id):
417399 logger.debug("on_send_leave_request: content: %s", content)
418400
419 room_version = yield self.store.get_room_version(room_id)
401 room_version = await self.store.get_room_version(room_id)
420402 format_ver = room_version_to_event_format(room_version)
421403 pdu = event_from_pdu_json(content, format_ver)
422404
423405 origin_host, _ = parse_server_name(origin)
424 yield self.check_server_matches_acl(origin_host, pdu.room_id)
406 await self.check_server_matches_acl(origin_host, pdu.room_id)
425407
426408 logger.debug("on_send_leave_request: pdu sigs: %s", pdu.signatures)
427409
428 pdu = yield self._check_sigs_and_hash(room_version, pdu)
429
430 yield self.handler.on_send_leave_request(origin, pdu)
410 pdu = await self._check_sigs_and_hash(room_version, pdu)
411
412 await self.handler.on_send_leave_request(origin, pdu)
431413 return 200, {}
432414
433 @defer.inlineCallbacks
434 def on_event_auth(self, origin, room_id, event_id):
435 with (yield self._server_linearizer.queue((origin, room_id))):
415 async def on_event_auth(self, origin, room_id, event_id):
416 with (await self._server_linearizer.queue((origin, room_id))):
436417 origin_host, _ = parse_server_name(origin)
437 yield self.check_server_matches_acl(origin_host, room_id)
418 await self.check_server_matches_acl(origin_host, room_id)
438419
439420 time_now = self._clock.time_msec()
440 auth_pdus = yield self.handler.on_event_auth(event_id)
421 auth_pdus = await self.handler.on_event_auth(event_id)
441422 res = {"auth_chain": [a.get_pdu_json(time_now) for a in auth_pdus]}
442423 return 200, res
443424
444 @defer.inlineCallbacks
445 def on_query_auth_request(self, origin, content, room_id, event_id):
425 async def on_query_auth_request(self, origin, content, room_id, event_id):
446426 """
447427 Content is a dict with keys::
448428 auth_chain (list): A list of events that give the auth chain.
461441 Returns:
462442 Deferred: Results in `dict` with the same format as `content`
463443 """
464 with (yield self._server_linearizer.queue((origin, room_id))):
444 with (await self._server_linearizer.queue((origin, room_id))):
465445 origin_host, _ = parse_server_name(origin)
466 yield self.check_server_matches_acl(origin_host, room_id)
467
468 room_version = yield self.store.get_room_version(room_id)
446 await self.check_server_matches_acl(origin_host, room_id)
447
448 room_version = await self.store.get_room_version(room_id)
469449 format_ver = room_version_to_event_format(room_version)
470450
471451 auth_chain = [
472452 event_from_pdu_json(e, format_ver) for e in content["auth_chain"]
473453 ]
474454
475 signed_auth = yield self._check_sigs_and_hash_and_fetch(
455 signed_auth = await self._check_sigs_and_hash_and_fetch(
476456 origin, auth_chain, outlier=True, room_version=room_version
477457 )
478458
479 ret = yield self.handler.on_query_auth(
459 ret = await self.handler.on_query_auth(
480460 origin,
481461 event_id,
482462 room_id,
502482 return self.on_query_request("user_devices", user_id)
503483
504484 @trace
505 @defer.inlineCallbacks
506 @log_function
507 def on_claim_client_keys(self, origin, content):
485 async def on_claim_client_keys(self, origin, content):
508486 query = []
509487 for user_id, device_keys in content.get("one_time_keys", {}).items():
510488 for device_id, algorithm in device_keys.items():
511489 query.append((user_id, device_id, algorithm))
512490
513491 log_kv({"message": "Claiming one time keys.", "user, device pairs": query})
514 results = yield self.store.claim_e2e_one_time_keys(query)
492 results = await self.store.claim_e2e_one_time_keys(query)
515493
516494 json_result = {}
517495 for user_id, device_keys in results.items():
535513
536514 return {"one_time_keys": json_result}
537515
538 @defer.inlineCallbacks
539 @log_function
540 def on_get_missing_events(
516 async def on_get_missing_events(
541517 self, origin, room_id, earliest_events, latest_events, limit
542518 ):
543 with (yield self._server_linearizer.queue((origin, room_id))):
519 with (await self._server_linearizer.queue((origin, room_id))):
544520 origin_host, _ = parse_server_name(origin)
545 yield self.check_server_matches_acl(origin_host, room_id)
521 await self.check_server_matches_acl(origin_host, room_id)
546522
547523 logger.info(
548524 "on_get_missing_events: earliest_events: %r, latest_events: %r,"
552528 limit,
553529 )
554530
555 missing_events = yield self.handler.on_get_missing_events(
531 missing_events = await self.handler.on_get_missing_events(
556532 origin, room_id, earliest_events, latest_events, limit
557533 )
558534
585561 destination=None,
586562 )
587563
588 @defer.inlineCallbacks
589 def _handle_received_pdu(self, origin, pdu):
564 async def _handle_received_pdu(self, origin, pdu):
590565 """ Process a PDU received in a federation /send/ transaction.
591566
592567 If the event is invalid, then this method throws a FederationError.
639614 logger.info("Accepting join PDU %s from %s", pdu.event_id, origin)
640615
641616 # We've already checked that we know the room version by this point
642 room_version = yield self.store.get_room_version(pdu.room_id)
617 room_version = await self.store.get_room_version(pdu.room_id)
643618
644619 # Check signature.
645620 try:
646 pdu = yield self._check_sigs_and_hash(room_version, pdu)
621 pdu = await self._check_sigs_and_hash(room_version, pdu)
647622 except SynapseError as e:
648623 raise FederationError("ERROR", e.code, e.msg, affected=pdu.event_id)
649624
650 yield self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
625 await self.handler.on_receive_pdu(origin, pdu, sent_to_us_directly=True)
651626
652627 def __str__(self):
653628 return "<ReplicationLayer(%s)>" % self.server_name
654629
655 @defer.inlineCallbacks
656 def exchange_third_party_invite(
630 async def exchange_third_party_invite(
657631 self, sender_user_id, target_user_id, room_id, signed
658632 ):
659 ret = yield self.handler.exchange_third_party_invite(
633 ret = await self.handler.exchange_third_party_invite(
660634 sender_user_id, target_user_id, room_id, signed
661635 )
662636 return ret
663637
664 @defer.inlineCallbacks
665 def on_exchange_third_party_invite_request(self, room_id, event_dict):
666 ret = yield self.handler.on_exchange_third_party_invite_request(
638 async def on_exchange_third_party_invite_request(self, room_id, event_dict):
639 ret = await self.handler.on_exchange_third_party_invite_request(
667640 room_id, event_dict
668641 )
669642 return ret
670643
671 @defer.inlineCallbacks
672 def check_server_matches_acl(self, server_name, room_id):
644 async def check_server_matches_acl(self, server_name, room_id):
673645 """Check if the given server is allowed by the server ACLs in the room
674646
675647 Args:
679651 Raises:
680652 AuthError if the server does not match the ACL
681653 """
682 state_ids = yield self.store.get_current_state_ids(room_id)
654 state_ids = await self.store.get_current_state_ids(room_id)
683655 acl_event_id = state_ids.get((EventTypes.ServerACL, ""))
684656
685657 if not acl_event_id:
686658 return
687659
688 acl_event = yield self.store.get_event(acl_event_id)
660 acl_event = await self.store.get_event(acl_event_id)
689661 if server_matches_acl_event(server_name, acl_event):
690662 return
691663
708680 # server name is a literal IP
709681 allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
710682 if not isinstance(allow_ip_literals, bool):
711 logger.warn("Ignorning non-bool allow_ip_literals flag")
683 logger.warning("Ignorning non-bool allow_ip_literals flag")
712684 allow_ip_literals = True
713685 if not allow_ip_literals:
714686 # check for ipv6 literals. These start with '['.
722694 # next, check the deny list
723695 deny = acl_event.content.get("deny", [])
724696 if not isinstance(deny, (list, tuple)):
725 logger.warn("Ignorning non-list deny ACL %s", deny)
697 logger.warning("Ignorning non-list deny ACL %s", deny)
726698 deny = []
727699 for e in deny:
728700 if _acl_entry_matches(server_name, e):
732704 # then the allow list.
733705 allow = acl_event.content.get("allow", [])
734706 if not isinstance(allow, (list, tuple)):
735 logger.warn("Ignorning non-list allow ACL %s", allow)
707 logger.warning("Ignorning non-list allow ACL %s", allow)
736708 allow = []
737709 for e in allow:
738710 if _acl_entry_matches(server_name, e):
746718
747719 def _acl_entry_matches(server_name, acl_entry):
748720 if not isinstance(acl_entry, six.string_types):
749 logger.warn(
721 logger.warning(
750722 "Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
751723 )
752724 return False
798770
799771 self.query_handlers[query_type] = handler
800772
801 @defer.inlineCallbacks
802 def on_edu(self, edu_type, origin, content):
773 async def on_edu(self, edu_type, origin, content):
803774 handler = self.edu_handlers.get(edu_type)
804775 if not handler:
805 logger.warn("No handler registered for EDU type %s", edu_type)
776 logger.warning("No handler registered for EDU type %s", edu_type)
806777
807778 with start_active_span_from_edu(content, "handle_edu"):
808779 try:
809 yield handler(origin, content)
780 await handler(origin, content)
810781 except SynapseError as e:
811782 logger.info("Failed to handle edu %r: %r", edu_type, e)
812783 except Exception:
815786 def on_query(self, query_type, args):
816787 handler = self.query_handlers.get(query_type)
817788 if not handler:
818 logger.warn("No handler registered for query type %s", query_type)
789 logger.warning("No handler registered for query type %s", query_type)
819790 raise NotFoundError("No handler for Query type '%s'" % (query_type,))
820791
821792 return handler(args)
839810
840811 super(ReplicationFederationHandlerRegistry, self).__init__()
841812
842 def on_edu(self, edu_type, origin, content):
813 async def on_edu(self, edu_type, origin, content):
843814 """Overrides FederationHandlerRegistry
844815 """
845816 if not self.config.use_presence and edu_type == "m.presence":
847818
848819 handler = self.edu_handlers.get(edu_type)
849820 if handler:
850 return super(ReplicationFederationHandlerRegistry, self).on_edu(
821 return await super(ReplicationFederationHandlerRegistry, self).on_edu(
851822 edu_type, origin, content
852823 )
853824
854 return self._send_edu(edu_type=edu_type, origin=origin, content=content)
855
856 def on_query(self, query_type, args):
825 return await self._send_edu(edu_type=edu_type, origin=origin, content=content)
826
827 async def on_query(self, query_type, args):
857828 """Overrides FederationHandlerRegistry
858829 """
859830 handler = self.query_handlers.get(query_type)
860831 if handler:
861 return handler(args)
862
863 return self._get_query_client(query_type=query_type, args=args)
832 return await handler(args)
833
834 return await self._get_query_client(query_type=query_type, args=args)
3535
3636 from sortedcontainers import SortedDict
3737
38 from twisted.internet import defer
39
3840 from synapse.metrics import LaterGauge
3941 from synapse.storage.presence import UserPresenceState
4042 from synapse.util.metrics import Measure
211213 receipt (synapse.types.ReadReceipt):
212214 """
213215 # nothing to do here: the replication listener will handle it.
214 pass
216 return defer.succeed(None)
215217
216218 def send_presence(self, states):
217219 """As per FederationSender
191191 # We have to keep 2 free slots for presence and rr_edus
192192 limit = MAX_EDUS_PER_TRANSACTION - 2
193193
194 device_update_edus, dev_list_id = (
195 yield self._get_device_update_edus(limit)
194 device_update_edus, dev_list_id = yield self._get_device_update_edus(
195 limit
196196 )
197197
198198 limit -= len(device_update_edus)
199199
200 to_device_edus, device_stream_id = (
201 yield self._get_to_device_message_edus(limit)
202 )
200 (
201 to_device_edus,
202 device_stream_id,
203 ) = yield self._get_to_device_message_edus(limit)
203204
204205 pending_edus = device_update_edus + to_device_edus
205206
358359 last_device_list = self._last_device_list_stream_id
359360
360361 # Retrieve list of new device updates to send to the destination
361 now_stream_id, results = yield self._store.get_devices_by_remote(
362 now_stream_id, results = yield self._store.get_device_updates_by_remote(
362363 self._destination, last_device_list, limit=limit
363364 )
364365 edus = [
365366 Edu(
366367 origin=self._server_name,
367368 destination=self._destination,
368 edu_type="m.device_list_update",
369 edu_type=edu_type,
369370 content=content,
370371 )
371 for content in results
372 for (edu_type, content) in results
372373 ]
373374
374 assert len(edus) <= limit, "get_devices_by_remote returned too many EDUs"
375 assert len(edus) <= limit, "get_device_updates_by_remote returned too many EDUs"
375376
376377 return (edus, now_stream_id)
377378
145145 if code == 200:
146146 for e_id, r in response.get("pdus", {}).items():
147147 if "error" in r:
148 logger.warn(
148 logger.warning(
149149 "TX [%s] {%s} Remote returned error for %s: %s",
150150 destination,
151151 txn_id,
154154 )
155155 else:
156156 for p in pdus:
157 logger.warn(
157 logger.warning(
158158 "TX [%s] {%s} Failed to send event %s",
159159 destination,
160160 txn_id,
1313 # limitations under the License.
1414
1515 """The transport layer is responsible for both sending transactions to remote
16 home servers and receiving a variety of requests from other home servers.
16 homeservers and receiving a variety of requests from other homeservers.
1717
18 By default this is done over HTTPS (and all home servers are required to
18 By default this is done over HTTPS (and all homeservers are required to
1919 support HTTPS), however individual pairings of servers may decide to
2020 communicate over a different (albeit still reliable) protocol.
2121 """
4343 given event.
4444
4545 Args:
46 destination (str): The host name of the remote home server we want
46 destination (str): The host name of the remote homeserver we want
4747 to get the state from.
4848 context (str): The name of the context we want the state of
4949 event_id (str): The event we want the context at.
6767 given event. Returns the state's event_id's
6868
6969 Args:
70 destination (str): The host name of the remote home server we want
70 destination (str): The host name of the remote homeserver we want
7171 to get the state from.
7272 context (str): The name of the context we want the state of
7373 event_id (str): The event we want the context at.
9090 """ Requests the pdu with give id and origin from the given server.
9191
9292 Args:
93 destination (str): The host name of the remote home server we want
93 destination (str): The host name of the remote homeserver we want
9494 to get the state from.
9595 event_id (str): The id of the event being requested.
9696 timeout (int): How long to try (in ms) the destination for before
121121 Deferred: Results in a dict received from the remote homeserver.
122122 """
123123 logger.debug(
124 "backfill dest=%s, room_id=%s, event_tuples=%s, limit=%s",
124 "backfill dest=%s, room_id=%s, event_tuples=%r, limit=%s",
125125 destination,
126126 room_id,
127 repr(event_tuples),
127 event_tuples,
128128 str(limit),
129129 )
130130
201201 sig = strip_quotes(param_dict["sig"])
202202 return origin, key, sig
203203 except Exception as e:
204 logger.warn(
204 logger.warning(
205205 "Error parsing auth header '%s': %s",
206206 header_bytes.decode("ascii", "replace"),
207207 e,
286286 except NoAuthenticationError:
287287 origin = None
288288 if self.REQUIRE_AUTH:
289 logger.warn("authenticate_request failed: missing authentication")
289 logger.warning(
290 "authenticate_request failed: missing authentication"
291 )
290292 raise
291293 except Exception as e:
292 logger.warn("authenticate_request failed: %s", e)
294 logger.warning("authenticate_request failed: %s", e)
293295 raise
294296
295297 request_tags = {
711713
712714 This API returns information in the same format as /publicRooms on the
713715 client API, but will only ever include local public rooms and hence is
714 intended for consumption by other home servers.
716 intended for consumption by other homeservers.
715717
716718 GET /publicRooms HTTP/1.1
717719
180180 elif not self.is_mine_id(user_id):
181181 destination = get_domain_from_id(user_id)
182182 else:
183 logger.warn(
183 logger.warning(
184184 "Incorrectly trying to do attestations for user: %r in %r",
185185 user_id,
186186 group_id,
487487 profile = yield self.profile_handler.get_profile_from_cache(user_id)
488488 user_profile.update(profile)
489489 except Exception as e:
490 logger.warn("Error getting profile for %s: %s", user_id, e)
490 logger.warning("Error getting profile for %s: %s", user_id, e)
491491 user_profiles.append(user_profile)
492492
493493 return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
3737 {"type": "m.tag", "content": {"tags": room_tags}, "room_id": room_id}
3838 )
3939
40 account_data, room_account_data = (
41 yield self.store.get_updated_account_data_for_user(user_id, last_stream_id)
42 )
40 (
41 account_data,
42 room_account_data,
43 ) = yield self.store.get_updated_account_data_for_user(user_id, last_stream_id)
4344
4445 for account_data_type, content in account_data.items():
4546 results.append({"type": account_data_type, "content": content})
2929 def __init__(self, hs):
3030 super(AdminHandler, self).__init__(hs)
3131
32 self.storage = hs.get_storage()
33 self.state_store = self.storage.state
34
3235 @defer.inlineCallbacks
3336 def get_whois(self, user):
3437 connections = []
204207
205208 from_key = events[-1].internal_metadata.after
206209
207 events = yield filter_events_for_client(self.store, user_id, events)
210 events = yield filter_events_for_client(self.storage, user_id, events)
208211
209212 writer.write_events(room_id, events)
210213
240243 for event_id in extremities:
241244 if not event_to_unseen_prevs[event_id]:
242245 continue
243 state = yield self.store.get_state_for_event(event_id)
246 state = yield self.state_store.get_state_for_event(event_id)
244247 writer.write_state(room_id, event_id, state)
245248
246249 return writer.finished()
7272 try:
7373 limit = 100
7474 while True:
75 upper_bound, events = yield self.store.get_new_events_for_appservice(
75 (
76 upper_bound,
77 events,
78 ) = yield self.store.get_new_events_for_appservice(
7679 self.current_max, limit
7780 )
7881
101101 login_types.append(t)
102102 self._supported_login_types = login_types
103103
104 self._account_ratelimiter = Ratelimiter()
105 self._failed_attempts_ratelimiter = Ratelimiter()
104 # Ratelimiter for failed auth during UIA. Uses same ratelimit config
105 # as per `rc_login.failed_attempts`.
106 self._failed_uia_attempts_ratelimiter = Ratelimiter()
106107
107108 self._clock = self.hs.get_clock()
108109
132133
133134 AuthError if the client has completed a login flow, and it gives
134135 a different user to `requester`
135 """
136
137 LimitExceededError if the ratelimiter's failed request count for this
138 user is too high to proceed
139
140 """
141
142 user_id = requester.user.to_string()
143
144 # Check if we should be ratelimited due to too many previous failed attempts
145 self._failed_uia_attempts_ratelimiter.ratelimit(
146 user_id,
147 time_now_s=self._clock.time(),
148 rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
149 burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
150 update=False,
151 )
136152
137153 # build a list of supported flows
138154 flows = [[login_type] for login_type in self._supported_login_types]
139155
140 result, params, _ = yield self.check_auth(flows, request_body, clientip)
156 try:
157 result, params, _ = yield self.check_auth(flows, request_body, clientip)
158 except LoginError:
159 # Update the ratelimite to say we failed (`can_do_action` doesn't raise).
160 self._failed_uia_attempts_ratelimiter.can_do_action(
161 user_id,
162 time_now_s=self._clock.time(),
163 rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
164 burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
165 update=True,
166 )
167 raise
141168
142169 # find the completed login type
143170 for login_type in self._supported_login_types:
222249 # could continue registration from your phone having clicked the
223250 # email auth link on there). It's probably too open to abuse
224251 # because it lets unauthenticated clients store arbitrary objects
225 # on a home server.
252 # on a homeserver.
226253 # Revisit: Assumimg the REST APIs do sensible validation, the data
227254 # isn't arbintrary.
228255 session["clientdict"] = clientdict
500527 multiple matches
501528
502529 Raises:
503 LimitExceededError if the ratelimiter's login requests count for this
504 user is too high too proceed.
505530 UserDeactivatedError if a user is found but is deactivated.
506531 """
507 self.ratelimit_login_per_account(user_id)
508532 res = yield self._find_user_id_and_pwd_hash(user_id)
509533 if res is not None:
510534 return res[0]
524548
525549 result = None
526550 if not user_infos:
527 logger.warn("Attempted to login as %s but they do not exist", user_id)
551 logger.warning("Attempted to login as %s but they do not exist", user_id)
528552 elif len(user_infos) == 1:
529553 # a single match (possibly not exact)
530554 result = user_infos.popitem()
533557 result = (user_id, user_infos[user_id])
534558 else:
535559 # multiple matches, none of them exact
536 logger.warn(
560 logger.warning(
537561 "Attempted to login as %s but it matches more than one user "
538562 "inexactly: %r",
539563 user_id,
571595 StoreError if there was a problem accessing the database
572596 SynapseError if there was a problem with the request
573597 LoginError if there was an authentication problem.
574 LimitExceededError if the ratelimiter's login requests count for this
575 user is too high too proceed.
576598 """
577599
578600 if username.startswith("@"):
579601 qualified_user_id = username
580602 else:
581603 qualified_user_id = UserID(username, self.hs.hostname).to_string()
582
583 self.ratelimit_login_per_account(qualified_user_id)
584604
585605 login_type = login_submission.get("type")
586606 known_login_type = False
648668
649669 if not known_login_type:
650670 raise SynapseError(400, "Unknown login type %s" % login_type)
651
652 # unknown username or invalid password.
653 self._failed_attempts_ratelimiter.ratelimit(
654 qualified_user_id.lower(),
655 time_now_s=self._clock.time(),
656 rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
657 burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
658 update=True,
659 )
660671
661672 # We raise a 403 here, but note that if we're doing user-interactive
662673 # login, it turns all LoginErrors into a 401 anyway.
709720 Returns:
710721 Deferred[unicode] the canonical_user_id, or Deferred[None] if
711722 unknown user/bad password
712
713 Raises:
714 LimitExceededError if the ratelimiter's login requests count for this
715 user is too high too proceed.
716723 """
717724 lookupres = yield self._find_user_id_and_pwd_hash(user_id)
718725 if not lookupres:
727734
728735 result = yield self.validate_hash(password, password_hash)
729736 if not result:
730 logger.warn("Failed password login for user %s", user_id)
737 logger.warning("Failed password login for user %s", user_id)
731738 return None
732739 return user_id
733740
741748 auth_api.validate_macaroon(macaroon, "login", user_id)
742749 except Exception:
743750 raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN)
744 self.ratelimit_login_per_account(user_id)
751
745752 yield self.auth.check_auth_blocking(user_id)
746753 return user_id
747754
809816 @defer.inlineCallbacks
810817 def add_threepid(self, user_id, medium, address, validated_at):
811818 # 'Canonicalise' email addresses down to lower case.
812 # We've now moving towards the Home Server being the entity that
819 # We've now moving towards the homeserver being the entity that
813820 # is responsible for validating threepids used for resetting passwords
814821 # on accounts, so in future Synapse will gain knowledge of specific
815822 # types (mediums) of threepid. For now, we still use the existing
910917 return defer_to_thread(self.hs.get_reactor(), _do_validate_hash)
911918 else:
912919 return defer.succeed(False)
913
914 def ratelimit_login_per_account(self, user_id):
915 """Checks whether the process must be stopped because of ratelimiting.
916
917 Checks against two ratelimiters: the generic one for login attempts per
918 account and the one specific to failed attempts.
919
920 Args:
921 user_id (unicode): complete @user:id
922
923 Raises:
924 LimitExceededError if one of the ratelimiters' login requests count
925 for this user is too high too proceed.
926 """
927 self._failed_attempts_ratelimiter.ratelimit(
928 user_id.lower(),
929 time_now_s=self._clock.time(),
930 rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
931 burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
932 update=False,
933 )
934
935 self._account_ratelimiter.ratelimit(
936 user_id.lower(),
937 time_now_s=self._clock.time(),
938 rate_hz=self.hs.config.rc_login_account.per_second,
939 burst_count=self.hs.config.rc_login_account.burst_count,
940 update=True,
941 )
942920
943921
944922 @attr.s
4545
4646 self.hs = hs
4747 self.state = hs.get_state_handler()
48 self.state_store = hs.get_storage().state
4849 self._auth_handler = hs.get_auth_handler()
4950
5051 @trace
177178 continue
178179
179180 # mapping from event_id -> state_dict
180 prev_state_ids = yield self.store.get_state_ids_for_events(event_ids)
181 prev_state_ids = yield self.state_store.get_state_ids_for_events(event_ids)
181182
182183 # Check if we've joined the room? If so we just blindly add all the users to
183184 # the "possibly changed" users.
457458 @defer.inlineCallbacks
458459 def on_federation_query_user_devices(self, user_id):
459460 stream_id, devices = yield self.store.get_devices_with_keys_by_user(user_id)
460 return {"user_id": user_id, "stream_id": stream_id, "devices": devices}
461 master_key = yield self.store.get_e2e_cross_signing_key(user_id, "master")
462 self_signing_key = yield self.store.get_e2e_cross_signing_key(
463 user_id, "self_signing"
464 )
465
466 return {
467 "user_id": user_id,
468 "stream_id": stream_id,
469 "devices": devices,
470 "master_key": master_key,
471 "self_signing_key": self_signing_key,
472 }
461473
462474 @defer.inlineCallbacks
463475 def user_left_room(self, user, room_id):
655667 except (NotRetryingDestination, RequestSendFailed, HttpResponseException):
656668 # TODO: Remember that we are now out of sync and try again
657669 # later
658 logger.warn("Failed to handle device list update for %s", user_id)
670 logger.warning("Failed to handle device list update for %s", user_id)
659671 # We abort on exceptions rather than accepting the update
660672 # as otherwise synapse will 'forget' that its device list
661673 # is out of date. If we bail then we will retry the resync
693705 # up on storing the total list of devices and only handle the
694706 # delta instead.
695707 if len(devices) > 1000:
696 logger.warn(
708 logger.warning(
697709 "Ignoring device list snapshot for %s as it has >1K devs (%d)",
698710 user_id,
699711 len(devices),
5151 local_messages = {}
5252 sender_user_id = content["sender"]
5353 if origin != get_domain_from_id(sender_user_id):
54 logger.warn(
54 logger.warning(
5555 "Dropping device message from %r with spoofed sender %r",
5656 origin,
5757 sender_user_id,
249249 ignore_backoff=True,
250250 )
251251 except CodeMessageException as e:
252 logging.warn("Error retrieving alias")
252 logging.warning("Error retrieving alias")
253253 if e.code == 404:
254254 result = None
255255 else:
282282 def on_directory_query(self, args):
283283 room_alias = RoomAlias.from_string(args["room_alias"])
284284 if not self.hs.is_mine(room_alias):
285 raise SynapseError(400, "Room Alias is not hosted on this Home Server")
285 raise SynapseError(400, "Room Alias is not hosted on this homeserver")
286286
287287 result = yield self.get_association_from_room_alias(room_alias)
288288
3535 get_verify_key_from_cross_signing_key,
3636 )
3737 from synapse.util import unwrapFirstError
38 from synapse.util.async_helpers import Linearizer
39 from synapse.util.caches.expiringcache import ExpiringCache
3840 from synapse.util.retryutils import NotRetryingDestination
3941
4042 logger = logging.getLogger(__name__)
4850 self.is_mine = hs.is_mine
4951 self.clock = hs.get_clock()
5052
53 self._edu_updater = SigningKeyEduUpdater(hs, self)
54
55 federation_registry = hs.get_federation_registry()
56
57 # FIXME: switch to m.signing_key_update when MSC1756 is merged into the spec
58 federation_registry.register_edu_handler(
59 "org.matrix.signing_key_update",
60 self._edu_updater.incoming_signing_key_update,
61 )
5162 # doesn't really work as part of the generic query API, because the
5263 # query request requires an object POST, but we abuse the
5364 # "query handler" interface.
54 hs.get_federation_registry().register_query_handler(
65 federation_registry.register_query_handler(
5566 "client_keys", self.on_federation_query_client_keys
5667 )
5768
118129 else:
119130 query_list.append((user_id, None))
120131
121 user_ids_not_in_cache, remote_results = (
122 yield self.store.get_user_devices_from_cache(query_list)
123 )
132 (
133 user_ids_not_in_cache,
134 remote_results,
135 ) = yield self.store.get_user_devices_from_cache(query_list)
124136 for user_id, devices in iteritems(remote_results):
125137 user_devices = results.setdefault(user_id, {})
126138 for device_id, device in iteritems(devices):
206218 if user_id in destination_query:
207219 results[user_id] = keys
208220
209 for user_id, key in remote_result["master_keys"].items():
210 if user_id in destination_query:
211 cross_signing_keys["master_keys"][user_id] = key
212
213 for user_id, key in remote_result["self_signing_keys"].items():
214 if user_id in destination_query:
215 cross_signing_keys["self_signing_keys"][user_id] = key
221 if "master_keys" in remote_result:
222 for user_id, key in remote_result["master_keys"].items():
223 if user_id in destination_query:
224 cross_signing_keys["master_keys"][user_id] = key
225
226 if "self_signing_keys" in remote_result:
227 for user_id, key in remote_result["self_signing_keys"].items():
228 if user_id in destination_query:
229 cross_signing_keys["self_signing_keys"][user_id] = key
216230
217231 except Exception as e:
218232 failure = _exception_to_failure(e)
250264
251265 Returns:
252266 defer.Deferred[dict[str, dict[str, dict]]]: map from
253 (master|self_signing|user_signing) -> user_id -> key
267 (master_keys|self_signing_keys|user_signing_keys) -> user_id -> key
254268 """
255269 master_keys = {}
256270 self_signing_keys = {}
342356 """
343357 device_keys_query = query_body.get("device_keys", {})
344358 res = yield self.query_local_devices(device_keys_query)
345 return {"device_keys": res}
359 ret = {"device_keys": res}
360
361 # add in the cross-signing keys
362 cross_signing_keys = yield self.get_cross_signing_keys_from_cache(
363 device_keys_query, None
364 )
365
366 ret.update(cross_signing_keys)
367
368 return ret
346369
347370 @trace
348371 @defer.inlineCallbacks
687710
688711 try:
689712 # get our self-signing key to verify the signatures
690 _, self_signing_key_id, self_signing_verify_key = yield self._get_e2e_cross_signing_verify_key(
691 user_id, "self_signing"
692 )
713 (
714 _,
715 self_signing_key_id,
716 self_signing_verify_key,
717 ) = yield self._get_e2e_cross_signing_verify_key(user_id, "self_signing")
693718
694719 # get our master key, since we may have received a signature of it.
695720 # We need to fetch it here so that we know what its key ID is, so
696721 # that we can check if a signature that was sent is a signature of
697722 # the master key or of a device
698 master_key, _, master_verify_key = yield self._get_e2e_cross_signing_verify_key(
699 user_id, "master"
700 )
723 (
724 master_key,
725 _,
726 master_verify_key,
727 ) = yield self._get_e2e_cross_signing_verify_key(user_id, "master")
701728
702729 # fetch our stored devices. This is used to 1. verify
703730 # signatures on the master key, and 2. to compare with what
837864
838865 try:
839866 # get our user-signing key to verify the signatures
840 user_signing_key, user_signing_key_id, user_signing_verify_key = yield self._get_e2e_cross_signing_verify_key(
841 user_id, "user_signing"
842 )
867 (
868 user_signing_key,
869 user_signing_key_id,
870 user_signing_verify_key,
871 ) = yield self._get_e2e_cross_signing_verify_key(user_id, "user_signing")
843872 except SynapseError as e:
844873 failure = _exception_to_failure(e)
845874 for user, devicemap in signatures.items():
858887 try:
859888 # get the target user's master key, to make sure it matches
860889 # what was sent
861 master_key, master_key_id, _ = yield self._get_e2e_cross_signing_verify_key(
890 (
891 master_key,
892 master_key_id,
893 _,
894 ) = yield self._get_e2e_cross_signing_verify_key(
862895 target_user, "master", user_id
863896 )
864897
10461079 target_user_id = attr.ib()
10471080 target_device_id = attr.ib()
10481081 signature = attr.ib()
1082
1083
1084 class SigningKeyEduUpdater(object):
1085 """Handles incoming signing key updates from federation and updates the DB"""
1086
1087 def __init__(self, hs, e2e_keys_handler):
1088 self.store = hs.get_datastore()
1089 self.federation = hs.get_federation_client()
1090 self.clock = hs.get_clock()
1091 self.e2e_keys_handler = e2e_keys_handler
1092
1093 self._remote_edu_linearizer = Linearizer(name="remote_signing_key")
1094
1095 # user_id -> list of updates waiting to be handled.
1096 self._pending_updates = {}
1097
1098 # Recently seen stream ids. We don't bother keeping these in the DB,
1099 # but they're useful to have them about to reduce the number of spurious
1100 # resyncs.
1101 self._seen_updates = ExpiringCache(
1102 cache_name="signing_key_update_edu",
1103 clock=self.clock,
1104 max_len=10000,
1105 expiry_ms=30 * 60 * 1000,
1106 iterable=True,
1107 )
1108
1109 @defer.inlineCallbacks
1110 def incoming_signing_key_update(self, origin, edu_content):
1111 """Called on incoming signing key update from federation. Responsible for
1112 parsing the EDU and adding to pending updates list.
1113
1114 Args:
1115 origin (string): the server that sent the EDU
1116 edu_content (dict): the contents of the EDU
1117 """
1118
1119 user_id = edu_content.pop("user_id")
1120 master_key = edu_content.pop("master_key", None)
1121 self_signing_key = edu_content.pop("self_signing_key", None)
1122
1123 if get_domain_from_id(user_id) != origin:
1124 logger.warning("Got signing key update edu for %r from %r", user_id, origin)
1125 return
1126
1127 room_ids = yield self.store.get_rooms_for_user(user_id)
1128 if not room_ids:
1129 # We don't share any rooms with this user. Ignore update, as we
1130 # probably won't get any further updates.
1131 return
1132
1133 self._pending_updates.setdefault(user_id, []).append(
1134 (master_key, self_signing_key)
1135 )
1136
1137 yield self._handle_signing_key_updates(user_id)
1138
1139 @defer.inlineCallbacks
1140 def _handle_signing_key_updates(self, user_id):
1141 """Actually handle pending updates.
1142
1143 Args:
1144 user_id (string): the user whose updates we are processing
1145 """
1146
1147 device_handler = self.e2e_keys_handler.device_handler
1148
1149 with (yield self._remote_edu_linearizer.queue(user_id)):
1150 pending_updates = self._pending_updates.pop(user_id, [])
1151 if not pending_updates:
1152 # This can happen since we batch updates
1153 return
1154
1155 device_ids = []
1156
1157 logger.info("pending updates: %r", pending_updates)
1158
1159 for master_key, self_signing_key in pending_updates:
1160 if master_key:
1161 yield self.store.set_e2e_cross_signing_key(
1162 user_id, "master", master_key
1163 )
1164 _, verify_key = get_verify_key_from_cross_signing_key(master_key)
1165 # verify_key is a VerifyKey from signedjson, which uses
1166 # .version to denote the portion of the key ID after the
1167 # algorithm and colon, which is the device ID
1168 device_ids.append(verify_key.version)
1169 if self_signing_key:
1170 yield self.store.set_e2e_cross_signing_key(
1171 user_id, "self_signing", self_signing_key
1172 )
1173 _, verify_key = get_verify_key_from_cross_signing_key(
1174 self_signing_key
1175 )
1176 device_ids.append(verify_key.version)
1177
1178 yield device_handler.notify_device_update(user_id, device_ids)
146146
147147
148148 class EventHandler(BaseHandler):
149 def __init__(self, hs):
150 super(EventHandler, self).__init__(hs)
151 self.storage = hs.get_storage()
152
149153 @defer.inlineCallbacks
150154 def get_event(self, user, room_id, event_id):
151155 """Retrieve a single specified event.
171175 is_peeking = user.to_string() not in users
172176
173177 filtered = yield filter_events_for_client(
174 self.store, user.to_string(), [event], is_peeking=is_peeking
178 self.storage, user.to_string(), [event], is_peeking=is_peeking
175179 )
176180
177181 if not filtered:
4444 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions
4545 from synapse.crypto.event_signing import compute_event_signature
4646 from synapse.event_auth import auth_types_for_event
47 from synapse.events.snapshot import EventContext
4748 from synapse.events.validator import EventValidator
4849 from synapse.logging.context import (
4950 make_deferred_yieldable,
9596 """Handles events that originated from federation.
9697 Responsible for:
9798 a) handling received Pdus before handing them on as Events to the rest
98 of the home server (including auth and state conflict resoultion)
99 of the homeserver (including auth and state conflict resoultion)
99100 b) converting events that were produced by local clients that may need
100 to be sent to remote home servers.
101 to be sent to remote homeservers.
101102 c) doing the necessary dances to invite remote users and join remote
102103 rooms.
103104 """
108109 self.hs = hs
109110
110111 self.store = hs.get_datastore()
112 self.storage = hs.get_storage()
113 self.state_store = self.storage.state
111114 self.federation_client = hs.get_federation_client()
112115 self.state_handler = hs.get_state_handler()
113116 self.server_name = hs.hostname
179182 try:
180183 self._sanity_check_event(pdu)
181184 except SynapseError as err:
182 logger.warn(
185 logger.warning(
183186 "[%s %s] Received event failed sanity checks", room_id, event_id
184187 )
185188 raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
300303 # following.
301304
302305 if sent_to_us_directly:
303 logger.warn(
306 logger.warning(
304307 "[%s %s] Rejecting: failed to fetch %d prev events: %s",
305308 room_id,
306309 event_id,
323326 event_map = {event_id: pdu}
324327 try:
325328 # Get the state of the events we know about
326 ours = yield self.store.get_state_groups_ids(room_id, seen)
329 ours = yield self.state_store.get_state_groups_ids(room_id, seen)
327330
328331 # state_maps is a list of mappings from (type, state_key) to event_id
329332 state_maps = list(
349352 # note that if any of the missing prevs share missing state or
350353 # auth events, the requests to fetch those events are deduped
351354 # by the get_pdu_cache in federation_client.
352 remote_state, got_auth_chain = (
353 yield self.federation_client.get_state_for_room(
354 origin, room_id, p
355 )
355 (
356 remote_state,
357 got_auth_chain,
358 ) = yield self.federation_client.get_state_for_room(
359 origin, room_id, p
356360 )
357361
358362 # we want the state *after* p; get_state_for_room returns the
404408 state = [event_map[e] for e in six.itervalues(state_map)]
405409 auth_chain = list(auth_chains)
406410 except Exception:
407 logger.warn(
411 logger.warning(
408412 "[%s %s] Error attempting to resolve state at missing "
409413 "prev_events",
410414 room_id,
517521 # We failed to get the missing events, but since we need to handle
518522 # the case of `get_missing_events` not returning the necessary
519523 # events anyway, it is safe to simply log the error and continue.
520 logger.warn("[%s %s]: Failed to get prev_events: %s", room_id, event_id, e)
524 logger.warning(
525 "[%s %s]: Failed to get prev_events: %s", room_id, event_id, e
526 )
521527 return
522528
523529 logger.info(
544550 yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
545551 except FederationError as e:
546552 if e.code == 403:
547 logger.warn(
553 logger.warning(
548554 "[%s %s] Received prev_event %s failed history check.",
549555 room_id,
550556 event_id,
887893 # We set `check_history_visibility_only` as we might otherwise get false
888894 # positives from users having been erased.
889895 filtered_extremities = yield filter_events_for_server(
890 self.store,
896 self.storage,
891897 self.server_name,
892898 list(extremities_events.values()),
893899 redact=False,
10581064 SynapseError if the event does not pass muster
10591065 """
10601066 if len(ev.prev_event_ids()) > 20:
1061 logger.warn(
1067 logger.warning(
10621068 "Rejecting event %s which has %i prev_events",
10631069 ev.event_id,
10641070 len(ev.prev_event_ids()),
10661072 raise SynapseError(http_client.BAD_REQUEST, "Too many prev_events")
10671073
10681074 if len(ev.auth_event_ids()) > 10:
1069 logger.warn(
1075 logger.warning(
10701076 "Rejecting event %s which has %i auth_events",
10711077 ev.event_id,
10721078 len(ev.auth_event_ids()),
11001106 @defer.inlineCallbacks
11011107 def do_invite_join(self, target_hosts, room_id, joinee, content):
11021108 """ Attempts to join the `joinee` to the room `room_id` via the
1103 server `target_host`.
1109 servers contained in `target_hosts`.
11041110
11051111 This first triggers a /make_join/ request that returns a partial
11061112 event that we can fill out and sign. This is then sent to the
11091115
11101116 We suspend processing of any received events from this room until we
11111117 have finished processing the join.
1118
1119 Args:
1120 target_hosts (Iterable[str]): List of servers to attempt to join the room with.
1121
1122 room_id (str): The ID of the room to join.
1123
1124 joinee (str): The User ID of the joining user.
1125
1126 content (dict): The event content to use for the join event.
11121127 """
11131128 logger.debug("Joining %s to %s", joinee, room_id)
11141129
11671182 pass
11681183
11691184 yield self._persist_auth_tree(origin, auth_chain, state, event)
1185
1186 # Check whether this room is the result of an upgrade of a room we already know
1187 # about. If so, migrate over user information
1188 predecessor = yield self.store.get_room_predecessor(room_id)
1189 if not predecessor:
1190 return
1191 old_room_id = predecessor["room_id"]
1192 logger.debug(
1193 "Found predecessor for %s during remote join: %s", room_id, old_room_id
1194 )
1195
1196 # We retrieve the room member handler here as to not cause a cyclic dependency
1197 member_handler = self.hs.get_room_member_handler()
1198 yield member_handler.transfer_room_state_on_room_upgrade(
1199 old_room_id, room_id
1200 )
11701201
11711202 logger.debug("Finished joining %s to %s", joinee, room_id)
11721203 finally:
12021233 with nested_logging_context(p.event_id):
12031234 yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
12041235 except Exception as e:
1205 logger.warn(
1236 logger.warning(
12061237 "Error handling queued PDU %s from %s: %s", p.event_id, origin, e
12071238 )
12081239
12491280 builder=builder
12501281 )
12511282 except AuthError as e:
1252 logger.warn("Failed to create join %r because %s", event, e)
1283 logger.warning("Failed to create join to %s because %s", room_id, e)
12531284 raise e
12541285
12551286 event_allowed = yield self.third_party_event_rules.check_event_allowed(
14931524 room_version, event, context, do_sig_check=False
14941525 )
14951526 except AuthError as e:
1496 logger.warn("Failed to create new leave %r because %s", event, e)
1527 logger.warning("Failed to create new leave %r because %s", event, e)
14971528 raise e
14981529
14991530 return event
15481579 event_id, allow_none=False, check_room_id=room_id
15491580 )
15501581
1551 state_groups = yield self.store.get_state_groups(room_id, [event_id])
1582 state_groups = yield self.state_store.get_state_groups(room_id, [event_id])
15521583
15531584 if state_groups:
15541585 _, state = list(iteritems(state_groups)).pop()
15771608 event_id, allow_none=False, check_room_id=room_id
15781609 )
15791610
1580 state_groups = yield self.store.get_state_groups_ids(room_id, [event_id])
1611 state_groups = yield self.state_store.get_state_groups_ids(room_id, [event_id])
15811612
15821613 if state_groups:
15831614 _, state = list(state_groups.items()).pop()
16051636
16061637 events = yield self.store.get_backfill_events(room_id, pdu_list, limit)
16071638
1608 events = yield filter_events_for_server(self.store, origin, events)
1639 events = yield filter_events_for_server(self.storage, origin, events)
16091640
16101641 return events
16111642
16351666 if not in_room:
16361667 raise AuthError(403, "Host not in room.")
16371668
1638 events = yield filter_events_for_server(self.store, origin, [event])
1669 events = yield filter_events_for_server(self.storage, origin, [event])
16391670 event = events[0]
16401671 return event
16411672 else:
16561687 # hack around with a try/finally instead.
16571688 success = False
16581689 try:
1659 if not event.internal_metadata.is_outlier() and not backfilled:
1690 if (
1691 not event.internal_metadata.is_outlier()
1692 and not backfilled
1693 and not context.rejected
1694 ):
16601695 yield self.action_generator.handle_push_actions_for_event(
16611696 event, context
16621697 )
17871822 # cause SynapseErrors in auth.check. We don't want to give up
17881823 # the attempt to federate altogether in such cases.
17891824
1790 logger.warn("Rejecting %s because %s", e.event_id, err.msg)
1825 logger.warning("Rejecting %s because %s", e.event_id, err.msg)
17911826
17921827 if e == event:
17931828 raise
18401875 if c and c.type == EventTypes.Create:
18411876 auth_events[(c.type, c.state_key)] = c
18421877
1843 try:
1844 yield self.do_auth(origin, event, context, auth_events=auth_events)
1845 except AuthError as e:
1846 logger.warn("[%s %s] Rejecting: %s", event.room_id, event.event_id, e.msg)
1847
1848 context.rejected = RejectedReason.AUTH_ERROR
1878 context = yield self.do_auth(origin, event, context, auth_events=auth_events)
18491879
18501880 if not context.rejected:
18511881 yield self._check_for_soft_fail(event, state, backfilled)
19011931 # given state at the event. This should correctly handle cases
19021932 # like bans, especially with state res v2.
19031933
1904 state_sets = yield self.store.get_state_groups(
1934 state_sets = yield self.state_store.get_state_groups(
19051935 event.room_id, extrem_ids
19061936 )
19071937 state_sets = list(state_sets.values())
19371967 try:
19381968 event_auth.check(room_version, event, auth_events=current_auth_events)
19391969 except AuthError as e:
1940 logger.warn("Soft-failing %r because %s", event, e)
1970 logger.warning("Soft-failing %r because %s", event, e)
19411971 event.internal_metadata.soft_failed = True
19421972
19431973 @defer.inlineCallbacks
19922022 )
19932023
19942024 missing_events = yield filter_events_for_server(
1995 self.store, origin, missing_events
2025 self.storage, origin, missing_events
19962026 )
19972027
19982028 return missing_events
20142044
20152045 Also NB that this function adds entries to it.
20162046 Returns:
2017 defer.Deferred[None]
2047 defer.Deferred[EventContext]: updated context object
20182048 """
20192049 room_version = yield self.store.get_room_version(event.room_id)
20202050
20212051 try:
2022 yield self._update_auth_events_and_context_for_auth(
2052 context = yield self._update_auth_events_and_context_for_auth(
20232053 origin, event, context, auth_events
20242054 )
20252055 except Exception:
20362066 try:
20372067 event_auth.check(room_version, event, auth_events=auth_events)
20382068 except AuthError as e:
2039 logger.warn("Failed auth resolution for %r because %s", event, e)
2040 raise e
2069 logger.warning("Failed auth resolution for %r because %s", event, e)
2070 context.rejected = RejectedReason.AUTH_ERROR
2071
2072 return context
20412073
20422074 @defer.inlineCallbacks
20432075 def _update_auth_events_and_context_for_auth(
20612093 auth_events (dict[(str, str)->synapse.events.EventBase]):
20622094
20632095 Returns:
2064 defer.Deferred[None]
2096 defer.Deferred[EventContext]: updated context
20652097 """
20662098 event_auth_events = set(event.auth_event_ids())
20672099
21002132 # The other side isn't around or doesn't implement the
21012133 # endpoint, so lets just bail out.
21022134 logger.info("Failed to get event auth from remote: %s", e)
2103 return
2135 return context
21042136
21052137 seen_remotes = yield self.store.have_seen_events(
21062138 [e.event_id for e in remote_auth_chain]
21412173
21422174 if event.internal_metadata.is_outlier():
21432175 logger.info("Skipping auth_event fetch for outlier")
2144 return
2176 return context
21452177
21462178 # FIXME: Assumes we have and stored all the state for all the
21472179 # prev_events
21502182 )
21512183
21522184 if not different_auth:
2153 return
2185 return context
21542186
21552187 logger.info(
21562188 "auth_events refers to events which are not in our calculated auth "
21972229
21982230 auth_events.update(new_state)
21992231
2200 yield self._update_context_for_auth_events(
2232 context = yield self._update_context_for_auth_events(
22012233 event, context, auth_events, event_key
22022234 )
2235
2236 return context
22032237
22042238 @defer.inlineCallbacks
22052239 def _update_context_for_auth_events(self, event, context, auth_events, event_key):
22092243 Args:
22102244 event (Event): The event we're handling the context for
22112245
2212 context (synapse.events.snapshot.EventContext): event context
2213 to be updated
2246 context (synapse.events.snapshot.EventContext): initial event context
22142247
22152248 auth_events (dict[(str, str)->str]): Events to update in the event
22162249 context.
22172250
22182251 event_key ((str, str)): (type, state_key) for the current event.
22192252 this will not be included in the current_state in the context.
2253
2254 Returns:
2255 Deferred[EventContext]: new event context
22202256 """
22212257 state_updates = {
22222258 k: a.event_id for k, a in iteritems(auth_events) if k != event_key
22332269
22342270 # create a new state group as a delta from the existing one.
22352271 prev_group = context.state_group
2236 state_group = yield self.store.store_state_group(
2272 state_group = yield self.state_store.store_state_group(
22372273 event.event_id,
22382274 event.room_id,
22392275 prev_group=prev_group,
22412277 current_state_ids=current_state_ids,
22422278 )
22432279
2244 yield context.update_state(
2280 return EventContext.with_state(
22452281 state_group=state_group,
2282 state_group_before_event=context.state_group_before_event,
22462283 current_state_ids=current_state_ids,
22472284 prev_state_ids=prev_state_ids,
22482285 prev_group=prev_group,
24302467 try:
24312468 yield self.auth.check_from_context(room_version, event, context)
24322469 except AuthError as e:
2433 logger.warn("Denying new third party invite %r because %s", event, e)
2470 logger.warning("Denying new third party invite %r because %s", event, e)
24342471 raise e
24352472
24362473 yield self._check_signature(event, context)
2474
2475 # We retrieve the room member handler here as to not cause a cyclic dependency
24372476 member_handler = self.hs.get_room_member_handler()
24382477 yield member_handler.send_membership_event(None, event, context)
24392478 else:
24862525 try:
24872526 yield self.auth.check_from_context(room_version, event, context)
24882527 except AuthError as e:
2489 logger.warn("Denying third party invite %r because %s", event, e)
2528 logger.warning("Denying third party invite %r because %s", event, e)
24902529 raise e
24912530 yield self._check_signature(event, context)
24922531
24942533 # though the sender isn't a local user.
24952534 event.internal_metadata.send_on_behalf_of = get_domain_from_id(event.sender)
24962535
2536 # We retrieve the room member handler here as to not cause a cyclic dependency
24972537 member_handler = self.hs.get_room_member_handler()
24982538 yield member_handler.send_membership_event(None, event, context)
24992539
26632703 backfilled=backfilled,
26642704 )
26652705 else:
2666 max_stream_id = yield self.store.persist_events(
2706 max_stream_id = yield self.storage.persistence.persist_events(
26672707 event_and_contexts, backfilled=backfilled
26682708 )
26692709
391391 try:
392392 user_profile = yield self.profile_handler.get_profile(user_id)
393393 except Exception as e:
394 logger.warn("No profile for user %s: %s", user_id, e)
394 logger.warning("No profile for user %s: %s", user_id, e)
395395 user_profile = {}
396396
397397 return {"state": "invite", "user_profile": user_profile}
271271 changed = False
272272 if e.code in (400, 404, 501):
273273 # The remote server probably doesn't support unbinding (yet)
274 logger.warn("Received %d response while unbinding threepid", e.code)
274 logger.warning("Received %d response while unbinding threepid", e.code)
275275 else:
276276 logger.error("Failed to unbind threepid on identity server: %s", e)
277277 raise SynapseError(500, "Failed to contact identity server")
402402
403403 if self.hs.config.using_identity_server_from_trusted_list:
404404 # Warn that a deprecated config option is in use
405 logger.warn(
405 logger.warning(
406406 'The config option "trust_identity_server_for_password_resets" '
407407 'has been replaced by "account_threepid_delegate". '
408408 "Please consult the sample config at docs/sample_config.yaml for "
456456
457457 if self.hs.config.using_identity_server_from_trusted_list:
458458 # Warn that a deprecated config option is in use
459 logger.warn(
459 logger.warning(
460460 'The config option "trust_identity_server_for_password_resets" '
461461 'has been replaced by "account_threepid_delegate". '
462462 "Please consult the sample config at docs/sample_config.yaml for "
4242 self.validator = EventValidator()
4343 self.snapshot_cache = SnapshotCache()
4444 self._event_serializer = hs.get_event_client_serializer()
45 self.storage = hs.get_storage()
46 self.state_store = self.storage.state
4547
4648 def snapshot_all_rooms(
4749 self,
125127
126128 tags_by_room = yield self.store.get_tags_for_user(user_id)
127129
128 account_data, account_data_by_room = (
129 yield self.store.get_account_data_for_user(user_id)
130 account_data, account_data_by_room = yield self.store.get_account_data_for_user(
131 user_id
130132 )
131133
132134 public_room_ids = yield self.store.get_public_room_ids()
168170 elif event.membership == Membership.LEAVE:
169171 room_end_token = "s%d" % (event.stream_ordering,)
170172 deferred_room_state = run_in_background(
171 self.store.get_state_for_events, [event.event_id]
173 self.state_store.get_state_for_events, [event.event_id]
172174 )
173175 deferred_room_state.addCallback(
174176 lambda states: states[event.event_id]
188190 )
189191 ).addErrback(unwrapFirstError)
190192
191 messages = yield filter_events_for_client(self.store, user_id, messages)
193 messages = yield filter_events_for_client(
194 self.storage, user_id, messages
195 )
192196
193197 start_token = now_token.copy_and_replace("room_key", token)
194198 end_token = now_token.copy_and_replace("room_key", room_end_token)
306310 def _room_initial_sync_parted(
307311 self, user_id, room_id, pagin_config, membership, member_event_id, is_peeking
308312 ):
309 room_state = yield self.store.get_state_for_events([member_event_id])
313 room_state = yield self.state_store.get_state_for_events([member_event_id])
310314
311315 room_state = room_state[member_event_id]
312316
321325 )
322326
323327 messages = yield filter_events_for_client(
324 self.store, user_id, messages, is_peeking=is_peeking
328 self.storage, user_id, messages, is_peeking=is_peeking
325329 )
326330
327331 start_token = StreamToken.START.copy_and_replace("room_key", token)
413417 )
414418
415419 messages = yield filter_events_for_client(
416 self.store, user_id, messages, is_peeking=is_peeking
420 self.storage, user_id, messages, is_peeking=is_peeking
417421 )
418422
419423 start_token = now_token.copy_and_replace("room_key", token)
5858 self.clock = hs.get_clock()
5959 self.state = hs.get_state_handler()
6060 self.store = hs.get_datastore()
61 self.storage = hs.get_storage()
62 self.state_store = self.storage.state
6163 self._event_serializer = hs.get_event_client_serializer()
6264
6365 @defer.inlineCallbacks
7375 Raises:
7476 SynapseError if something went wrong.
7577 """
76 membership, membership_event_id = yield self.auth.check_in_room_or_world_readable(
77 room_id, user_id
78 )
78 (
79 membership,
80 membership_event_id,
81 ) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
7982
8083 if membership == Membership.JOIN:
8184 data = yield self.state.get_current_state(room_id, event_type, state_key)
8285 elif membership == Membership.LEAVE:
8386 key = (event_type, state_key)
84 room_state = yield self.store.get_state_for_events(
87 room_state = yield self.state_store.get_state_for_events(
8588 [membership_event_id], StateFilter.from_types([key])
8689 )
8790 data = room_state[membership_event_id].get(key)
134137 raise NotFoundError("Can't find event for token %s" % (at_token,))
135138
136139 visible_events = yield filter_events_for_client(
137 self.store, user_id, last_events
140 self.storage, user_id, last_events
138141 )
139142
140143 event = last_events[0]
141144 if visible_events:
142 room_state = yield self.store.get_state_for_events(
145 room_state = yield self.state_store.get_state_for_events(
143146 [event.event_id], state_filter=state_filter
144147 )
145148 room_state = room_state[event.event_id]
150153 % (user_id, room_id, at_token),
151154 )
152155 else:
153 membership, membership_event_id = (
154 yield self.auth.check_in_room_or_world_readable(room_id, user_id)
155 )
156 (
157 membership,
158 membership_event_id,
159 ) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
156160
157161 if membership == Membership.JOIN:
158162 state_ids = yield self.store.get_filtered_current_state_ids(
160164 )
161165 room_state = yield self.store.get_events(state_ids.values())
162166 elif membership == Membership.LEAVE:
163 room_state = yield self.store.get_state_for_events(
167 room_state = yield self.state_store.get_state_for_events(
164168 [membership_event_id], state_filter=state_filter
165169 )
166170 room_state = room_state[membership_event_id]
233237 self.hs = hs
234238 self.auth = hs.get_auth()
235239 self.store = hs.get_datastore()
240 self.storage = hs.get_storage()
236241 self.state = hs.get_state_handler()
237242 self.clock = hs.get_clock()
238243 self.validator = EventValidator()
686691 try:
687692 yield self.auth.check_from_context(room_version, event, context)
688693 except AuthError as err:
689 logger.warn("Denying new event %r because %s", event, err)
694 logger.warning("Denying new event %r because %s", event, err)
690695 raise err
691696
692697 # Ensure that we can round trip before trying to persist in db
867872 if prev_state_ids:
868873 raise AuthError(403, "Changing the room create event is forbidden")
869874
870 (event_stream_id, max_stream_id) = yield self.store.persist_event(
875 event_stream_id, max_stream_id = yield self.storage.persistence.persist_event(
871876 event, context=context
872877 )
873878
6868 self.hs = hs
6969 self.auth = hs.get_auth()
7070 self.store = hs.get_datastore()
71 self.storage = hs.get_storage()
72 self.state_store = self.storage.state
7173 self.clock = hs.get_clock()
7274 self._server_name = hs.hostname
7375
124126 self._purges_in_progress_by_room.add(room_id)
125127 try:
126128 with (yield self.pagination_lock.write(room_id)):
127 yield self.store.purge_history(room_id, token, delete_local_events)
129 yield self.storage.purge_events.purge_history(
130 room_id, token, delete_local_events
131 )
128132 logger.info("[purge] complete")
129133 self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE
130134 except Exception:
167171 if joined:
168172 raise SynapseError(400, "Users are still joined to this room")
169173
170 await self.store.purge_room(room_id)
174 await self.storage.purge_events.purge_room(room_id)
171175
172176 @defer.inlineCallbacks
173177 def get_messages(
209213 source_config = pagin_config.get_source_config("room")
210214
211215 with (yield self.pagination_lock.read(room_id)):
212 membership, member_event_id = yield self.auth.check_in_room_or_world_readable(
213 room_id, user_id
214 )
216 (
217 membership,
218 member_event_id,
219 ) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
215220
216221 if source_config.direction == "b":
217222 # if we're going backwards, we might need to backfill. This
254259 events = event_filter.filter(events)
255260
256261 events = yield filter_events_for_client(
257 self.store, user_id, events, is_peeking=(member_event_id is None)
262 self.storage, user_id, events, is_peeking=(member_event_id is None)
258263 )
259264
260265 if not events:
273278 (EventTypes.Member, event.sender) for event in events
274279 )
275280
276 state_ids = yield self.store.get_state_ids_for_event(
281 state_ids = yield self.state_store.get_state_ids_for_event(
277282 events[0].event_id, state_filter=state_filter
278283 )
279284
294299 }
295300
296301 if state:
297 chunk["state"] = (
298 yield self._event_serializer.serialize_events(
299 state, time_now, as_client_event=as_client_event
300 )
302 chunk["state"] = yield self._event_serializer.serialize_events(
303 state, time_now, as_client_event=as_client_event
301304 )
302305
303306 return chunk
151151 by_admin (bool): Whether this change was made by an administrator.
152152 """
153153 if not self.hs.is_mine(target_user):
154 raise SynapseError(400, "User is not hosted on this Home Server")
154 raise SynapseError(400, "User is not hosted on this homeserver")
155155
156156 if not by_admin and target_user != requester.user:
157157 raise AuthError(400, "Cannot set another user's displayname")
206206 """target_user is the user whose avatar_url is to be changed;
207207 auth_user is the user attempting to make this change."""
208208 if not self.hs.is_mine(target_user):
209 raise SynapseError(400, "User is not hosted on this Home Server")
209 raise SynapseError(400, "User is not hosted on this homeserver")
210210
211211 if not by_admin and target_user != requester.user:
212212 raise AuthError(400, "Cannot set another user's avatar_url")
230230 def on_profile_query(self, args):
231231 user = UserID.from_string(args["user_id"])
232232 if not self.hs.is_mine(user):
233 raise SynapseError(400, "User is not hosted on this Home Server")
233 raise SynapseError(400, "User is not hosted on this homeserver")
234234
235235 just_field = args.get("field", None)
236236
274274 ratelimit=False, # Try to hide that these events aren't atomic.
275275 )
276276 except Exception as e:
277 logger.warn(
277 logger.warning(
278278 "Failed to update join event for room %s - %s", room_id, str(e)
279279 )
280280
1414
1515 import logging
1616
17 from twisted.internet import defer
18
1917 from synapse.util.async_helpers import Linearizer
2018
2119 from ._base import BaseHandler
3129 self.read_marker_linearizer = Linearizer(name="read_marker")
3230 self.notifier = hs.get_notifier()
3331
34 @defer.inlineCallbacks
35 def received_client_read_marker(self, room_id, user_id, event_id):
32 async def received_client_read_marker(self, room_id, user_id, event_id):
3633 """Updates the read marker for a given user in a given room if the event ID given
3734 is ahead in the stream relative to the current read marker.
3835
4037 the read marker has changed.
4138 """
4239
43 with (yield self.read_marker_linearizer.queue((room_id, user_id))):
44 existing_read_marker = yield self.store.get_account_data_for_room_and_type(
40 with await self.read_marker_linearizer.queue((room_id, user_id)):
41 existing_read_marker = await self.store.get_account_data_for_room_and_type(
4542 user_id, room_id, "m.fully_read"
4643 )
4744
4946
5047 if existing_read_marker:
5148 # Only update if the new marker is ahead in the stream
52 should_update = yield self.store.is_event_after(
49 should_update = await self.store.is_event_after(
5350 event_id, existing_read_marker["event_id"]
5451 )
5552
5653 if should_update:
5754 content = {"event_id": event_id}
58 max_id = yield self.store.add_account_data_to_room(
55 max_id = await self.store.add_account_data_to_room(
5956 user_id, room_id, "m.fully_read", content
6057 )
6158 self.notifier.on_new_event("account_data_key", max_id, users=[user_id])
1717
1818 from synapse.handlers._base import BaseHandler
1919 from synapse.types import ReadReceipt, get_domain_from_id
20 from synapse.util.async_helpers import maybe_awaitable
2021
2122 logger = logging.getLogger(__name__)
2223
3536 self.clock = self.hs.get_clock()
3637 self.state = hs.get_state_handler()
3738
38 @defer.inlineCallbacks
39 def _received_remote_receipt(self, origin, content):
39 async def _received_remote_receipt(self, origin, content):
4040 """Called when we receive an EDU of type m.receipt from a remote HS.
4141 """
4242 receipts = []
6161 )
6262 )
6363
64 yield self._handle_new_receipts(receipts)
64 await self._handle_new_receipts(receipts)
6565
66 @defer.inlineCallbacks
67 def _handle_new_receipts(self, receipts):
66 async def _handle_new_receipts(self, receipts):
6867 """Takes a list of receipts, stores them and informs the notifier.
6968 """
7069 min_batch_id = None
7170 max_batch_id = None
7271
7372 for receipt in receipts:
74 res = yield self.store.insert_receipt(
73 res = await self.store.insert_receipt(
7574 receipt.room_id,
7675 receipt.receipt_type,
7776 receipt.user_id,
9897
9998 self.notifier.on_new_event("receipt_key", max_batch_id, rooms=affected_room_ids)
10099 # Note that the min here shouldn't be relied upon to be accurate.
101 yield self.hs.get_pusherpool().on_new_receipts(
102 min_batch_id, max_batch_id, affected_room_ids
100 await maybe_awaitable(
101 self.hs.get_pusherpool().on_new_receipts(
102 min_batch_id, max_batch_id, affected_room_ids
103 )
103104 )
104105
105106 return True
106107
107 @defer.inlineCallbacks
108 def received_client_receipt(self, room_id, receipt_type, user_id, event_id):
108 async def received_client_receipt(self, room_id, receipt_type, user_id, event_id):
109109 """Called when a client tells us a local user has read up to the given
110110 event_id in the room.
111111 """
117117 data={"ts": int(self.clock.time_msec())},
118118 )
119119
120 is_new = yield self._handle_new_receipts([receipt])
120 is_new = await self._handle_new_receipts([receipt])
121121 if not is_new:
122122 return
123123
124 yield self.federation.send_read_receipt(receipt)
125
126 @defer.inlineCallbacks
127 def get_receipts_for_room(self, room_id, to_key):
128 """Gets all receipts for a room, upto the given key.
129 """
130 result = yield self.store.get_linearized_receipts_for_room(
131 room_id, to_key=to_key
132 )
133
134 if not result:
135 return []
136
137 return result
124 await self.federation.send_read_receipt(receipt)
138125
139126
140127 class ReceiptEventSource(object):
2323 AuthError,
2424 Codes,
2525 ConsentNotGivenError,
26 LimitExceededError,
2726 RegistrationError,
2827 SynapseError,
2928 )
167166 Raises:
168167 RegistrationError if there was a problem registering.
169168 """
169 yield self.check_registration_ratelimit(address)
170170
171171 yield self.auth.check_auth_blocking(threepid=threepid)
172172 password_hash = None
216216
217217 else:
218218 # autogen a sequential user ID
219 fail_count = 0
219220 user = None
220221 while not user:
222 # Fail after being unable to find a suitable ID a few times
223 if fail_count > 10:
224 raise SynapseError(500, "Unable to find a suitable guest user ID")
225
221226 localpart = yield self._generate_user_id()
222227 user = UserID(localpart, self.hs.hostname)
223228 user_id = user.to_string()
232237 create_profile_with_displayname=default_display_name,
233238 address=address,
234239 )
240
241 # Successfully registered
242 break
235243 except SynapseError:
236244 # if user id is taken, just generate another
237245 user = None
238246 user_id = None
247 fail_count += 1
239248
240249 if not self.hs.config.user_consent_at_registration:
241250 yield self._auto_join_rooms(user_id)
395404 room_id = room_identifier
396405 elif RoomAlias.is_valid(room_identifier):
397406 room_alias = RoomAlias.from_string(room_identifier)
398 room_id, remote_room_hosts = (
399 yield room_member_handler.lookup_room_alias(room_alias)
407 room_id, remote_room_hosts = yield room_member_handler.lookup_room_alias(
408 room_alias
400409 )
401410 room_id = room_id.to_string()
402411 else:
411420 remote_room_hosts=remote_room_hosts,
412421 action="join",
413422 ratelimit=False,
423 )
424
425 def check_registration_ratelimit(self, address):
426 """A simple helper method to check whether the registration rate limit has been hit
427 for a given IP address
428
429 Args:
430 address (str|None): the IP address used to perform the registration. If this is
431 None, no ratelimiting will be performed.
432
433 Raises:
434 LimitExceededError: If the rate limit has been exceeded.
435 """
436 if not address:
437 return
438
439 time_now = self.clock.time()
440
441 self.ratelimiter.ratelimit(
442 address,
443 time_now_s=time_now,
444 rate_hz=self.hs.config.rc_registration.per_second,
445 burst_count=self.hs.config.rc_registration.burst_count,
414446 )
415447
416448 def register_with_store(
445477 Returns:
446478 Deferred
447479 """
448 # Don't rate limit for app services
449 if appservice_id is None and address is not None:
450 time_now = self.clock.time()
451
452 allowed, time_allowed = self.ratelimiter.can_do_action(
453 address,
454 time_now_s=time_now,
455 rate_hz=self.hs.config.rc_registration.per_second,
456 burst_count=self.hs.config.rc_registration.burst_count,
457 )
458
459 if not allowed:
460 raise LimitExceededError(
461 retry_after_ms=int(1000 * (time_allowed - time_now))
462 )
463
464480 if self.hs.config.worker_app:
465481 return self._register_client(
466482 user_id=user_id,
613629 # And we add an email pusher for them by default, but only
614630 # if email notifications are enabled (so people don't start
615631 # getting mail spam where they weren't before if email
616 # notifs are set up on a home server)
632 # notifs are set up on a homeserver)
617633 if (
618634 self.hs.config.email_enable_notifs
619635 and self.hs.config.email_notif_for_new_users
128128 old_room_id,
129129 new_version, # args for _upgrade_room
130130 )
131
131132 return ret
132133
133134 @defer.inlineCallbacks
146147
147148 # we create and auth the tombstone event before properly creating the new
148149 # room, to check our user has perms in the old room.
149 tombstone_event, tombstone_context = (
150 yield self.event_creation_handler.create_event(
151 requester,
152 {
153 "type": EventTypes.Tombstone,
154 "state_key": "",
155 "room_id": old_room_id,
156 "sender": user_id,
157 "content": {
158 "body": "This room has been replaced",
159 "replacement_room": new_room_id,
160 },
150 (
151 tombstone_event,
152 tombstone_context,
153 ) = yield self.event_creation_handler.create_event(
154 requester,
155 {
156 "type": EventTypes.Tombstone,
157 "state_key": "",
158 "room_id": old_room_id,
159 "sender": user_id,
160 "content": {
161 "body": "This room has been replaced",
162 "replacement_room": new_room_id,
161163 },
162 token_id=requester.access_token_id,
163 )
164 },
165 token_id=requester.access_token_id,
164166 )
165167 old_room_version = yield self.store.get_room_version(old_room_id)
166168 yield self.auth.check_from_context(
187189 requester, old_room_id, new_room_id, old_room_state
188190 )
189191
190 # and finally, shut down the PLs in the old room, and update them in the new
192 # Copy over user push rules, tags and migrate room directory state
193 yield self.room_member_handler.transfer_room_state_on_room_upgrade(
194 old_room_id, new_room_id
195 )
196
197 # finally, shut down the PLs in the old room, and update them in the new
191198 # room.
192199 yield self._update_upgraded_room_pls(
193200 requester, old_room_id, new_room_id, old_room_state
821828 def __init__(self, hs):
822829 self.hs = hs
823830 self.store = hs.get_datastore()
831 self.storage = hs.get_storage()
832 self.state_store = self.storage.state
824833
825834 @defer.inlineCallbacks
826835 def get_event_context(self, user, room_id, event_id, limit, event_filter):
847856
848857 def filter_evts(events):
849858 return filter_events_for_client(
850 self.store, user.to_string(), events, is_peeking=is_peeking
859 self.storage, user.to_string(), events, is_peeking=is_peeking
851860 )
852861
853862 event = yield self.store.get_event(
889898 # first? Shouldn't we be consistent with /sync?
890899 # https://github.com/matrix-org/matrix-doc/issues/687
891900
892 state = yield self.store.get_state_for_events(
901 state = yield self.state_store.get_state_for_events(
893902 [last_event_id], state_filter=state_filter
894903 )
895904 results["state"] = list(state[last_event_id].values())
921930
922931 from_token = RoomStreamToken.parse(from_key)
923932 if from_token.topological:
924 logger.warn("Stream has topological part!!!! %r", from_key)
933 logger.warning("Stream has topological part!!!! %r", from_key)
925934 from_key = "s%s" % (from_token.stream,)
926935
927936 app_service = self.store.get_app_service_by_user_id(user.to_string())
202202 prev_member_event = yield self.store.get_event(prev_member_event_id)
203203 newly_joined = prev_member_event.membership != Membership.JOIN
204204 if newly_joined:
205 # Copy over user state if we're joining an upgraded room
206 yield self.copy_user_state_if_room_upgrade(
207 room_id, requester.user.to_string()
208 )
209205 yield self._user_joined_room(target, room_id)
210206 elif event.membership == Membership.LEAVE:
211207 if prev_member_event_id:
454450 requester, remote_room_hosts, room_id, target, content
455451 )
456452
457 # Copy over user state if this is a join on an remote upgraded room
458 yield self.copy_user_state_if_room_upgrade(
459 room_id, requester.user.to_string()
460 )
461
462453 return remote_join_response
463454
464455 elif effective_membership_state == Membership.LEAVE:
497488 return res
498489
499490 @defer.inlineCallbacks
500 def copy_user_state_if_room_upgrade(self, new_room_id, user_id):
501 """Copy user-specific information when they join a new room if that new room is the
502 result of a room upgrade
503
504 Args:
505 new_room_id (str): The ID of the room the user is joining
506 user_id (str): The ID of the user
491 def transfer_room_state_on_room_upgrade(self, old_room_id, room_id):
492 """Upon our server becoming aware of an upgraded room, either by upgrading a room
493 ourselves or joining one, we can transfer over information from the previous room.
494
495 Copies user state (tags/push rules) for every local user that was in the old room, as
496 well as migrating the room directory state.
497
498 Args:
499 old_room_id (str): The ID of the old room
500
501 room_id (str): The ID of the new room
507502
508503 Returns:
509504 Deferred
510505 """
511 # Check if the new room is an upgraded room
512 predecessor = yield self.store.get_room_predecessor(new_room_id)
513 if not predecessor:
514 return
506 # Find all local users that were in the old room and copy over each user's state
507 users = yield self.store.get_users_in_room(old_room_id)
508 yield self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
509
510 # Add new room to the room directory if the old room was there
511 # Remove old room from the room directory
512 old_room = yield self.store.get_room(old_room_id)
513 if old_room and old_room["is_public"]:
514 yield self.store.set_room_is_public(old_room_id, False)
515 yield self.store.set_room_is_public(room_id, True)
516
517 # Check if any groups we own contain the predecessor room
518 local_group_ids = yield self.store.get_local_groups_for_room(old_room_id)
519 for group_id in local_group_ids:
520 # Add new the new room to those groups
521 yield self.store.add_room_to_group(group_id, room_id, old_room["is_public"])
522
523 # Remove the old room from those groups
524 yield self.store.remove_room_from_group(group_id, old_room_id)
525
526 @defer.inlineCallbacks
527 def copy_user_state_on_room_upgrade(self, old_room_id, new_room_id, user_ids):
528 """Copy user-specific information when they join a new room when that new room is the
529 result of a room upgrade
530
531 Args:
532 old_room_id (str): The ID of upgraded room
533 new_room_id (str): The ID of the new room
534 user_ids (Iterable[str]): User IDs to copy state for
535
536 Returns:
537 Deferred
538 """
515539
516540 logger.debug(
517 "Found predecessor for %s: %s. Copying over room tags and push " "rules",
541 "Copying over room tags and push rules from %s to %s for users %s",
542 old_room_id,
518543 new_room_id,
519 predecessor,
520 )
521
522 # It is an upgraded room. Copy over old tags
523 yield self.copy_room_tags_and_direct_to_room(
524 predecessor["room_id"], new_room_id, user_id
525 )
526 # Copy over push rules
527 yield self.store.copy_push_rules_from_room_to_room_for_user(
528 predecessor["room_id"], new_room_id, user_id
529 )
544 user_ids,
545 )
546
547 for user_id in user_ids:
548 try:
549 # It is an upgraded room. Copy over old tags
550 yield self.copy_room_tags_and_direct_to_room(
551 old_room_id, new_room_id, user_id
552 )
553 # Copy over push rules
554 yield self.store.copy_push_rules_from_room_to_room_for_user(
555 old_room_id, new_room_id, user_id
556 )
557 except Exception:
558 logger.exception(
559 "Error copying tags and/or push rules from rooms %s to %s for user %s. "
560 "Skipping...",
561 old_room_id,
562 new_room_id,
563 user_id,
564 )
565 continue
530566
531567 @defer.inlineCallbacks
532568 def send_membership_event(self, requester, event, context, ratelimit=True):
758794 if room_avatar_event:
759795 room_avatar_url = room_avatar_event.content.get("url", "")
760796
761 token, public_keys, fallback_public_key, display_name = (
762 yield self.identity_handler.ask_id_server_for_third_party_invite(
763 requester=requester,
764 id_server=id_server,
765 medium=medium,
766 address=address,
767 room_id=room_id,
768 inviter_user_id=user.to_string(),
769 room_alias=canonical_room_alias,
770 room_avatar_url=room_avatar_url,
771 room_join_rules=room_join_rules,
772 room_name=room_name,
773 inviter_display_name=inviter_display_name,
774 inviter_avatar_url=inviter_avatar_url,
775 id_access_token=id_access_token,
776 )
797 (
798 token,
799 public_keys,
800 fallback_public_key,
801 display_name,
802 ) = yield self.identity_handler.ask_id_server_for_third_party_invite(
803 requester=requester,
804 id_server=id_server,
805 medium=medium,
806 address=address,
807 room_id=room_id,
808 inviter_user_id=user.to_string(),
809 room_alias=canonical_room_alias,
810 room_avatar_url=room_avatar_url,
811 room_join_rules=room_join_rules,
812 room_name=room_name,
813 inviter_display_name=inviter_display_name,
814 inviter_avatar_url=inviter_avatar_url,
815 id_access_token=id_access_token,
777816 )
778817
779818 yield self.event_creation_handler.create_and_send_nonmember_event(
3434 def __init__(self, hs):
3535 super(SearchHandler, self).__init__(hs)
3636 self._event_serializer = hs.get_event_client_serializer()
37 self.storage = hs.get_storage()
38 self.state_store = self.storage.state
3739
3840 @defer.inlineCallbacks
3941 def get_old_rooms_from_upgraded_room(self, room_id):
220222 filtered_events = search_filter.filter([r["event"] for r in results])
221223
222224 events = yield filter_events_for_client(
223 self.store, user.to_string(), filtered_events
225 self.storage, user.to_string(), filtered_events
224226 )
225227
226228 events.sort(key=lambda e: -rank_map[e.event_id])
270272 filtered_events = search_filter.filter([r["event"] for r in results])
271273
272274 events = yield filter_events_for_client(
273 self.store, user.to_string(), filtered_events
275 self.storage, user.to_string(), filtered_events
274276 )
275277
276278 room_events.extend(events)
339341 )
340342
341343 res["events_before"] = yield filter_events_for_client(
342 self.store, user.to_string(), res["events_before"]
344 self.storage, user.to_string(), res["events_before"]
343345 )
344346
345347 res["events_after"] = yield filter_events_for_client(
346 self.store, user.to_string(), res["events_after"]
348 self.storage, user.to_string(), res["events_after"]
347349 )
348350
349351 res["start"] = now_token.copy_and_replace(
371373 [(EventTypes.Member, sender) for sender in senders]
372374 )
373375
374 state = yield self.store.get_state_for_event(
376 state = yield self.state_store.get_state_for_event(
375377 last_event_id, state_filter
376378 )
377379
393395 time_now = self.clock.time_msec()
394396
395397 for context in contexts.values():
396 context["events_before"] = (
397 yield self._event_serializer.serialize_events(
398 context["events_before"], time_now
399 )
400 )
401 context["events_after"] = (
402 yield self._event_serializer.serialize_events(
403 context["events_after"], time_now
404 )
398 context["events_before"] = yield self._event_serializer.serialize_events(
399 context["events_before"], time_now
400 )
401 context["events_after"] = yield self._event_serializer.serialize_events(
402 context["events_after"], time_now
405403 )
406404
407405 state_results = {}
107107 user_deltas = {}
108108
109109 # Then count deltas for total_events and total_event_bytes.
110 room_count, user_count = yield self.store.get_changes_room_total_events_and_bytes(
110 (
111 room_count,
112 user_count,
113 ) = yield self.store.get_changes_room_total_events_and_bytes(
111114 self.pos, max_pos
112115 )
113116
229229 self.response_cache = ResponseCache(hs, "sync")
230230 self.state = hs.get_state_handler()
231231 self.auth = hs.get_auth()
232 self.storage = hs.get_storage()
233 self.state_store = self.storage.state
232234
233235 # ExpiringCache((User, Device)) -> LruCache(state_key => event_id)
234236 self.lazy_loaded_members_cache = ExpiringCache(
416418 current_state_ids = frozenset(itervalues(current_state_ids))
417419
418420 recents = yield filter_events_for_client(
419 self.store,
421 self.storage,
420422 sync_config.user.to_string(),
421423 recents,
422424 always_include_ids=current_state_ids,
469471 current_state_ids = frozenset(itervalues(current_state_ids))
470472
471473 loaded_recents = yield filter_events_for_client(
472 self.store,
474 self.storage,
473475 sync_config.user.to_string(),
474476 loaded_recents,
475477 always_include_ids=current_state_ids,
508510 Returns:
509511 A Deferred map from ((type, state_key)->Event)
510512 """
511 state_ids = yield self.store.get_state_ids_for_event(
513 state_ids = yield self.state_store.get_state_ids_for_event(
512514 event.event_id, state_filter=state_filter
513515 )
514516 if event.is_state():
579581 return None
580582
581583 last_event = last_events[-1]
582 state_ids = yield self.store.get_state_ids_for_event(
584 state_ids = yield self.state_store.get_state_ids_for_event(
583585 last_event.event_id,
584586 state_filter=StateFilter.from_types(
585587 [(EventTypes.Name, ""), (EventTypes.CanonicalAlias, "")]
756758
757759 if full_state:
758760 if batch:
759 current_state_ids = yield self.store.get_state_ids_for_event(
761 current_state_ids = yield self.state_store.get_state_ids_for_event(
760762 batch.events[-1].event_id, state_filter=state_filter
761763 )
762764
763 state_ids = yield self.store.get_state_ids_for_event(
765 state_ids = yield self.state_store.get_state_ids_for_event(
764766 batch.events[0].event_id, state_filter=state_filter
765767 )
766768
780782 )
781783 elif batch.limited:
782784 if batch:
783 state_at_timeline_start = yield self.store.get_state_ids_for_event(
785 state_at_timeline_start = yield self.state_store.get_state_ids_for_event(
784786 batch.events[0].event_id, state_filter=state_filter
785787 )
786788 else:
809811 )
810812
811813 if batch:
812 current_state_ids = yield self.store.get_state_ids_for_event(
814 current_state_ids = yield self.state_store.get_state_ids_for_event(
813815 batch.events[-1].event_id, state_filter=state_filter
814816 )
815817 else:
840842 # So we fish out all the member events corresponding to the
841843 # timeline here, and then dedupe any redundant ones below.
842844
843 state_ids = yield self.store.get_state_ids_for_event(
845 state_ids = yield self.state_store.get_state_ids_for_event(
844846 batch.events[0].event_id,
845847 # we only want members!
846848 state_filter=StateFilter.from_types(
12031205 since_token = sync_result_builder.since_token
12041206
12051207 if since_token and not sync_result_builder.full_state:
1206 account_data, account_data_by_room = (
1207 yield self.store.get_updated_account_data_for_user(
1208 user_id, since_token.account_data_key
1209 )
1208 (
1209 account_data,
1210 account_data_by_room,
1211 ) = yield self.store.get_updated_account_data_for_user(
1212 user_id, since_token.account_data_key
12101213 )
12111214
12121215 push_rules_changed = yield self.store.have_push_rules_changed_for_user(
12181221 sync_config.user
12191222 )
12201223 else:
1221 account_data, account_data_by_room = (
1222 yield self.store.get_account_data_for_user(sync_config.user.to_string())
1223 )
1224 (
1225 account_data,
1226 account_data_by_room,
1227 ) = yield self.store.get_account_data_for_user(sync_config.user.to_string())
12241228
12251229 account_data["m.push_rules"] = yield self.push_rules_for_user(
12261230 sync_config.user
119119 auth_user_id = auth_user.to_string()
120120
121121 if not self.is_mine_id(target_user_id):
122 raise SynapseError(400, "User is not hosted on this Home Server")
122 raise SynapseError(400, "User is not hosted on this homeserver")
123123
124124 if target_user_id != auth_user_id:
125125 raise AuthError(400, "Cannot set another user's typing state")
149149 auth_user_id = auth_user.to_string()
150150
151151 if not self.is_mine_id(target_user_id):
152 raise SynapseError(400, "User is not hosted on this Home Server")
152 raise SynapseError(400, "User is not hosted on this homeserver")
153153
154154 if target_user_id != auth_user_id:
155155 raise AuthError(400, "Cannot set another user's typing state")
8080 def __init__(self, hs):
8181 super().__init__(hs)
8282 self._enabled = bool(hs.config.recaptcha_private_key)
83 self._http_client = hs.get_simple_http_client()
83 self._http_client = hs.get_proxied_http_client()
8484 self._url = hs.config.recaptcha_siteverify_api
8585 self._secret = hs.config.recaptcha_private_key
8686
4444 cancelled_to_request_timed_out_error,
4545 redact_uri,
4646 )
47 from synapse.http.proxyagent import ProxyAgent
4748 from synapse.logging.context import make_deferred_yieldable
4849 from synapse.logging.opentracing import set_tag, start_active_span, tags
4950 from synapse.util.async_helpers import timeout_deferred
182183 using HTTP in Matrix
183184 """
184185
185 def __init__(self, hs, treq_args={}, ip_whitelist=None, ip_blacklist=None):
186 def __init__(
187 self,
188 hs,
189 treq_args={},
190 ip_whitelist=None,
191 ip_blacklist=None,
192 http_proxy=None,
193 https_proxy=None,
194 ):
186195 """
187196 Args:
188197 hs (synapse.server.HomeServer)
191200 we may not request.
192201 ip_whitelist (netaddr.IPSet): The whitelisted IP addresses, that we can
193202 request if it were otherwise caught in a blacklist.
203 http_proxy (bytes): proxy server to use for http connections. host[:port]
204 https_proxy (bytes): proxy server to use for https connections. host[:port]
194205 """
195206 self.hs = hs
196207
235246 # The default context factory in Twisted 14.0.0 (which we require) is
236247 # BrowserLikePolicyForHTTPS which will do regular cert validation
237248 # 'like a browser'
238 self.agent = Agent(
249 self.agent = ProxyAgent(
239250 self.reactor,
240251 connectTimeout=15,
241252 contextFactory=self.hs.get_http_client_context_factory(),
242253 pool=pool,
254 http_proxy=http_proxy,
255 https_proxy=https_proxy,
243256 )
244257
245258 if self._ip_blacklist:
534547 b"Content-Length" in resp_headers
535548 and int(resp_headers[b"Content-Length"][0]) > max_size
536549 ):
537 logger.warn("Requested URL is too large > %r bytes" % (self.max_size,))
550 logger.warning("Requested URL is too large > %r bytes" % (self.max_size,))
538551 raise SynapseError(
539552 502,
540553 "Requested file is too large > %r bytes" % (self.max_size,),
542555 )
543556
544557 if response.code > 299:
545 logger.warn("Got %d when downloading %s" % (response.code, url))
558 logger.warning("Got %d when downloading %s" % (response.code, url))
546559 raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN)
547560
548561 # TODO: if our Content-Type is HTML or something, just read the first
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 The Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import logging
16
17 from zope.interface import implementer
18
19 from twisted.internet import defer, protocol
20 from twisted.internet.error import ConnectError
21 from twisted.internet.interfaces import IStreamClientEndpoint
22 from twisted.internet.protocol import connectionDone
23 from twisted.web import http
24
25 logger = logging.getLogger(__name__)
26
27
28 class ProxyConnectError(ConnectError):
29 pass
30
31
32 @implementer(IStreamClientEndpoint)
33 class HTTPConnectProxyEndpoint(object):
34 """An Endpoint implementation which will send a CONNECT request to an http proxy
35
36 Wraps an existing HostnameEndpoint for the proxy.
37
38 When we get the connect() request from the connection pool (via the TLS wrapper),
39 we'll first connect to the proxy endpoint with a ProtocolFactory which will make the
40 CONNECT request. Once that completes, we invoke the protocolFactory which was passed
41 in.
42
43 Args:
44 reactor: the Twisted reactor to use for the connection
45 proxy_endpoint (IStreamClientEndpoint): the endpoint to use to connect to the
46 proxy
47 host (bytes): hostname that we want to CONNECT to
48 port (int): port that we want to connect to
49 """
50
51 def __init__(self, reactor, proxy_endpoint, host, port):
52 self._reactor = reactor
53 self._proxy_endpoint = proxy_endpoint
54 self._host = host
55 self._port = port
56
57 def __repr__(self):
58 return "<HTTPConnectProxyEndpoint %s>" % (self._proxy_endpoint,)
59
60 def connect(self, protocolFactory):
61 f = HTTPProxiedClientFactory(self._host, self._port, protocolFactory)
62 d = self._proxy_endpoint.connect(f)
63 # once the tcp socket connects successfully, we need to wait for the
64 # CONNECT to complete.
65 d.addCallback(lambda conn: f.on_connection)
66 return d
67
68
69 class HTTPProxiedClientFactory(protocol.ClientFactory):
70 """ClientFactory wrapper that triggers an HTTP proxy CONNECT on connect.
71
72 Once the CONNECT completes, invokes the original ClientFactory to build the
73 HTTP Protocol object and run the rest of the connection.
74
75 Args:
76 dst_host (bytes): hostname that we want to CONNECT to
77 dst_port (int): port that we want to connect to
78 wrapped_factory (protocol.ClientFactory): The original Factory
79 """
80
81 def __init__(self, dst_host, dst_port, wrapped_factory):
82 self.dst_host = dst_host
83 self.dst_port = dst_port
84 self.wrapped_factory = wrapped_factory
85 self.on_connection = defer.Deferred()
86
87 def startedConnecting(self, connector):
88 return self.wrapped_factory.startedConnecting(connector)
89
90 def buildProtocol(self, addr):
91 wrapped_protocol = self.wrapped_factory.buildProtocol(addr)
92
93 return HTTPConnectProtocol(
94 self.dst_host, self.dst_port, wrapped_protocol, self.on_connection
95 )
96
97 def clientConnectionFailed(self, connector, reason):
98 logger.debug("Connection to proxy failed: %s", reason)
99 if not self.on_connection.called:
100 self.on_connection.errback(reason)
101 return self.wrapped_factory.clientConnectionFailed(connector, reason)
102
103 def clientConnectionLost(self, connector, reason):
104 logger.debug("Connection to proxy lost: %s", reason)
105 if not self.on_connection.called:
106 self.on_connection.errback(reason)
107 return self.wrapped_factory.clientConnectionLost(connector, reason)
108
109
110 class HTTPConnectProtocol(protocol.Protocol):
111 """Protocol that wraps an existing Protocol to do a CONNECT handshake at connect
112
113 Args:
114 host (bytes): The original HTTP(s) hostname or IPv4 or IPv6 address literal
115 to put in the CONNECT request
116
117 port (int): The original HTTP(s) port to put in the CONNECT request
118
119 wrapped_protocol (interfaces.IProtocol): the original protocol (probably
120 HTTPChannel or TLSMemoryBIOProtocol, but could be anything really)
121
122 connected_deferred (Deferred): a Deferred which will be callbacked with
123 wrapped_protocol when the CONNECT completes
124 """
125
126 def __init__(self, host, port, wrapped_protocol, connected_deferred):
127 self.host = host
128 self.port = port
129 self.wrapped_protocol = wrapped_protocol
130 self.connected_deferred = connected_deferred
131 self.http_setup_client = HTTPConnectSetupClient(self.host, self.port)
132 self.http_setup_client.on_connected.addCallback(self.proxyConnected)
133
134 def connectionMade(self):
135 self.http_setup_client.makeConnection(self.transport)
136
137 def connectionLost(self, reason=connectionDone):
138 if self.wrapped_protocol.connected:
139 self.wrapped_protocol.connectionLost(reason)
140
141 self.http_setup_client.connectionLost(reason)
142
143 if not self.connected_deferred.called:
144 self.connected_deferred.errback(reason)
145
146 def proxyConnected(self, _):
147 self.wrapped_protocol.makeConnection(self.transport)
148
149 self.connected_deferred.callback(self.wrapped_protocol)
150
151 # Get any pending data from the http buf and forward it to the original protocol
152 buf = self.http_setup_client.clearLineBuffer()
153 if buf:
154 self.wrapped_protocol.dataReceived(buf)
155
156 def dataReceived(self, data):
157 # if we've set up the HTTP protocol, we can send the data there
158 if self.wrapped_protocol.connected:
159 return self.wrapped_protocol.dataReceived(data)
160
161 # otherwise, we must still be setting up the connection: send the data to the
162 # setup client
163 return self.http_setup_client.dataReceived(data)
164
165
166 class HTTPConnectSetupClient(http.HTTPClient):
167 """HTTPClient protocol to send a CONNECT message for proxies and read the response.
168
169 Args:
170 host (bytes): The hostname to send in the CONNECT message
171 port (int): The port to send in the CONNECT message
172 """
173
174 def __init__(self, host, port):
175 self.host = host
176 self.port = port
177 self.on_connected = defer.Deferred()
178
179 def connectionMade(self):
180 logger.debug("Connected to proxy, sending CONNECT")
181 self.sendCommand(b"CONNECT", b"%s:%d" % (self.host, self.port))
182 self.endHeaders()
183
184 def handleStatus(self, version, status, message):
185 logger.debug("Got Status: %s %s %s", status, message, version)
186 if status != b"200":
187 raise ProxyConnectError("Unexpected status on CONNECT: %s" % status)
188
189 def handleEndHeaders(self):
190 logger.debug("End Headers")
191 self.on_connected.callback(None)
192
193 def handleResponse(self, body):
194 pass
147147 # Try something in the cache, else rereaise
148148 cache_entry = self._cache.get(service_name, None)
149149 if cache_entry:
150 logger.warn(
150 logger.warning(
151151 "Failed to resolve %r, falling back to cache. %r", service_name, e
152152 )
153153 return list(cache_entry)
148148
149149 body = yield make_deferred_yieldable(d)
150150 except Exception as e:
151 logger.warn(
151 logger.warning(
152152 "{%s} [%s] Error reading response: %s",
153153 request.txn_id,
154154 request.destination,
456456 except Exception as e:
457457 # Eh, we're already going to raise an exception so lets
458458 # ignore if this fails.
459 logger.warn(
459 logger.warning(
460460 "{%s} [%s] Failed to get error response: %s %s: %s",
461461 request.txn_id,
462462 request.destination,
477477
478478 break
479479 except RequestSendFailed as e:
480 logger.warn(
480 logger.warning(
481481 "{%s} [%s] Request failed: %s %s: %s",
482482 request.txn_id,
483483 request.destination,
512512 raise
513513
514514 except Exception as e:
515 logger.warn(
515 logger.warning(
516516 "{%s} [%s] Request failed: %s %s: %s",
517517 request.txn_id,
518518 request.destination,
529529 """
530530 Builds the Authorization headers for a federation request
531531 Args:
532 destination (bytes|None): The desination home server of the request.
532 destination (bytes|None): The desination homeserver of the request.
533533 May be None if the destination is an identity server, in which case
534534 destination_is must be non-None.
535535 method (bytes): The HTTP method of the request
888888 d.addTimeout(self.default_timeout, self.reactor)
889889 length = yield make_deferred_yieldable(d)
890890 except Exception as e:
891 logger.warn(
891 logger.warning(
892892 "{%s} [%s] Error reading response: %s",
893893 request.txn_id,
894894 request.destination,
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 The Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15 import re
16
17 from zope.interface import implementer
18
19 from twisted.internet import defer
20 from twisted.internet.endpoints import HostnameEndpoint, wrapClientTLS
21 from twisted.python.failure import Failure
22 from twisted.web.client import URI, BrowserLikePolicyForHTTPS, _AgentBase
23 from twisted.web.error import SchemeNotSupported
24 from twisted.web.iweb import IAgent
25
26 from synapse.http.connectproxyclient import HTTPConnectProxyEndpoint
27
28 logger = logging.getLogger(__name__)
29
30 _VALID_URI = re.compile(br"\A[\x21-\x7e]+\Z")
31
32
33 @implementer(IAgent)
34 class ProxyAgent(_AgentBase):
35 """An Agent implementation which will use an HTTP proxy if one was requested
36
37 Args:
38 reactor: twisted reactor to place outgoing
39 connections.
40
41 contextFactory (IPolicyForHTTPS): A factory for TLS contexts, to control the
42 verification parameters of OpenSSL. The default is to use a
43 `BrowserLikePolicyForHTTPS`, so unless you have special
44 requirements you can leave this as-is.
45
46 connectTimeout (float): The amount of time that this Agent will wait
47 for the peer to accept a connection.
48
49 bindAddress (bytes): The local address for client sockets to bind to.
50
51 pool (HTTPConnectionPool|None): connection pool to be used. If None, a
52 non-persistent pool instance will be created.
53 """
54
55 def __init__(
56 self,
57 reactor,
58 contextFactory=BrowserLikePolicyForHTTPS(),
59 connectTimeout=None,
60 bindAddress=None,
61 pool=None,
62 http_proxy=None,
63 https_proxy=None,
64 ):
65 _AgentBase.__init__(self, reactor, pool)
66
67 self._endpoint_kwargs = {}
68 if connectTimeout is not None:
69 self._endpoint_kwargs["timeout"] = connectTimeout
70 if bindAddress is not None:
71 self._endpoint_kwargs["bindAddress"] = bindAddress
72
73 self.http_proxy_endpoint = _http_proxy_endpoint(
74 http_proxy, reactor, **self._endpoint_kwargs
75 )
76
77 self.https_proxy_endpoint = _http_proxy_endpoint(
78 https_proxy, reactor, **self._endpoint_kwargs
79 )
80
81 self._policy_for_https = contextFactory
82 self._reactor = reactor
83
84 def request(self, method, uri, headers=None, bodyProducer=None):
85 """
86 Issue a request to the server indicated by the given uri.
87
88 Supports `http` and `https` schemes.
89
90 An existing connection from the connection pool may be used or a new one may be
91 created.
92
93 See also: twisted.web.iweb.IAgent.request
94
95 Args:
96 method (bytes): The request method to use, such as `GET`, `POST`, etc
97
98 uri (bytes): The location of the resource to request.
99
100 headers (Headers|None): Extra headers to send with the request
101
102 bodyProducer (IBodyProducer|None): An object which can generate bytes to
103 make up the body of this request (for example, the properly encoded
104 contents of a file for a file upload). Or, None if the request is to
105 have no body.
106
107 Returns:
108 Deferred[IResponse]: completes when the header of the response has
109 been received (regardless of the response status code).
110 """
111 uri = uri.strip()
112 if not _VALID_URI.match(uri):
113 raise ValueError("Invalid URI {!r}".format(uri))
114
115 parsed_uri = URI.fromBytes(uri)
116 pool_key = (parsed_uri.scheme, parsed_uri.host, parsed_uri.port)
117 request_path = parsed_uri.originForm
118
119 if parsed_uri.scheme == b"http" and self.http_proxy_endpoint:
120 # Cache *all* connections under the same key, since we are only
121 # connecting to a single destination, the proxy:
122 pool_key = ("http-proxy", self.http_proxy_endpoint)
123 endpoint = self.http_proxy_endpoint
124 request_path = uri
125 elif parsed_uri.scheme == b"https" and self.https_proxy_endpoint:
126 endpoint = HTTPConnectProxyEndpoint(
127 self._reactor,
128 self.https_proxy_endpoint,
129 parsed_uri.host,
130 parsed_uri.port,
131 )
132 else:
133 # not using a proxy
134 endpoint = HostnameEndpoint(
135 self._reactor, parsed_uri.host, parsed_uri.port, **self._endpoint_kwargs
136 )
137
138 logger.debug("Requesting %s via %s", uri, endpoint)
139
140 if parsed_uri.scheme == b"https":
141 tls_connection_creator = self._policy_for_https.creatorForNetloc(
142 parsed_uri.host, parsed_uri.port
143 )
144 endpoint = wrapClientTLS(tls_connection_creator, endpoint)
145 elif parsed_uri.scheme == b"http":
146 pass
147 else:
148 return defer.fail(
149 Failure(
150 SchemeNotSupported("Unsupported scheme: %r" % (parsed_uri.scheme,))
151 )
152 )
153
154 return self._requestWithEndpoint(
155 pool_key, endpoint, method, parsed_uri, headers, bodyProducer, request_path
156 )
157
158
159 def _http_proxy_endpoint(proxy, reactor, **kwargs):
160 """Parses an http proxy setting and returns an endpoint for the proxy
161
162 Args:
163 proxy (bytes|None): the proxy setting
164 reactor: reactor to be used to connect to the proxy
165 kwargs: other args to be passed to HostnameEndpoint
166
167 Returns:
168 interfaces.IStreamClientEndpoint|None: endpoint to use to connect to the proxy,
169 or None
170 """
171 if proxy is None:
172 return None
173
174 # currently we only support hostname:port. Some apps also support
175 # protocol://<host>[:port], which allows a way of requiring a TLS connection to the
176 # proxy.
177
178 host, port = parse_host_port(proxy, default_port=1080)
179 return HostnameEndpoint(reactor, host, port, **kwargs)
180
181
182 def parse_host_port(hostport, default_port=None):
183 # could have sworn we had one of these somewhere else...
184 if b":" in hostport:
185 host, port = hostport.rsplit(b":", 1)
186 try:
187 port = int(port)
188 return host, port
189 except ValueError:
190 # the thing after the : wasn't a valid port; presumably this is an
191 # IPv6 address.
192 pass
193
194 return hostport, default_port
169169 tag = context.tag
170170
171171 if context != self.start_context:
172 logger.warn(
172 logger.warning(
173173 "Context have unexpectedly changed %r, %r",
174174 context,
175175 self.start_context,
453453 # the Deferred fires, but since the flag is RIGHT THERE it seems like
454454 # a waste.
455455 if request._disconnected:
456 logger.warn(
456 logger.warning(
457457 "Not sending response to request %s, already disconnected.", request
458458 )
459459 return
218218 try:
219219 content_unicode = content_bytes.decode("utf8")
220220 except UnicodeDecodeError:
221 logger.warn("Unable to decode UTF-8")
221 logger.warning("Unable to decode UTF-8")
222222 raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
223223
224224 try:
225225 content = json.loads(content_unicode)
226226 except Exception as e:
227 logger.warn("Unable to parse JSON: %s", e)
227 logger.warning("Unable to parse JSON: %s", e)
228228 raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
229229
230230 return content
198198 # It's useful to log it here so that we can get an idea of when
199199 # the client disconnects.
200200 with PreserveLoggingContext(self.logcontext):
201 logger.warn(
201 logger.warning(
202202 "Error processing request %r: %s %s", self, reason.type, reason.value
203203 )
204204
304304 try:
305305 self.request_metrics.stop(self.finish_time, self.code, self.sentLength)
306306 except Exception as e:
307 logger.warn("Failed to stop metrics: %r", e)
307 logger.warning("Failed to stop metrics: %r", e)
308308
309309
310310 class XForwardedForRequest(SynapseRequest):
184184
185185
186186 def parse_drain_configs(
187 drains: dict
187 drains: dict,
188188 ) -> typing.Generator[DrainConfiguration, None, None]:
189189 """
190190 Parse the drain configurations.
152152 An IObserver that writes JSON logs to a TCP target.
153153
154154 Args:
155 hs (HomeServer): The Homeserver that is being logged for.
155 hs (HomeServer): The homeserver that is being logged for.
156156 host: The host of the logging target.
157157 port: The logging target's port.
158158 metadata: Metadata to be added to each log entry.
293293 """Enters this logging context into thread local storage"""
294294 old_context = self.set_current_context(self)
295295 if self.previous_context != old_context:
296 logger.warn(
296 logger.warning(
297297 "Expected previous context %r, found %r",
298298 self.previous_context,
299299 old_context,
158158 self.room_to_user_streams = {}
159159
160160 self.hs = hs
161 self.storage = hs.get_storage()
161162 self.event_sources = hs.get_event_sources()
162163 self.store = hs.get_datastore()
163164 self.pending_new_room_events = []
424425
425426 if name == "room":
426427 new_events = yield filter_events_for_client(
427 self.store, user.to_string(), new_events, is_peeking=is_peeking
428 self.storage,
429 user.to_string(),
430 new_events,
431 is_peeking=is_peeking,
428432 )
429433 elif name == "presence":
430434 now = self.clock.time_msec()
7878 dict of user_id -> push_rules
7979 """
8080 room_id = event.room_id
81 rules_for_room = self._get_rules_for_room(room_id)
81 rules_for_room = yield self._get_rules_for_room(room_id)
8282
8383 rules_by_user = yield rules_for_room.get_rules(event, context)
8484
148148
149149 room_members = yield self.store.get_joined_users_from_context(event, context)
150150
151 (power_levels, sender_power_level) = (
152 yield self._get_power_levels_and_sender_level(event, context)
153 )
151 (
152 power_levels,
153 sender_power_level,
154 ) = yield self._get_power_levels_and_sender_level(event, context)
154155
155156 evaluator = PushRuleEvaluatorForEvent(
156157 event, len(room_members), sender_power_level, power_levels
233233 return
234234
235235 self.last_stream_ordering = last_stream_ordering
236 pusher_still_exists = (
237 yield self.store.update_pusher_last_stream_ordering_and_success(
238 self.app_id,
239 self.email,
240 self.user_id,
241 last_stream_ordering,
242 self.clock.time_msec(),
243 )
236 pusher_still_exists = yield self.store.update_pusher_last_stream_ordering_and_success(
237 self.app_id,
238 self.email,
239 self.user_id,
240 last_stream_ordering,
241 self.clock.time_msec(),
244242 )
245243 if not pusher_still_exists:
246244 # The pusher has been deleted while we were processing, so
6363 def __init__(self, hs, pusherdict):
6464 self.hs = hs
6565 self.store = self.hs.get_datastore()
66 self.storage = self.hs.get_storage()
6667 self.clock = self.hs.get_clock()
6768 self.state_handler = self.hs.get_state_handler()
6869 self.user_id = pusherdict["user_name"]
101102 if "url" not in self.data:
102103 raise PusherConfigException("'url' required in data for HTTP pusher")
103104 self.url = self.data["url"]
104 self.http_client = hs.get_simple_http_client()
105 self.http_client = hs.get_proxied_http_client()
105106 self.data_minus_url = {}
106107 self.data_minus_url.update(self.data)
107108 del self.data_minus_url["url"]
209210 http_push_processed_counter.inc()
210211 self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
211212 self.last_stream_ordering = push_action["stream_ordering"]
212 pusher_still_exists = (
213 yield self.store.update_pusher_last_stream_ordering_and_success(
214 self.app_id,
215 self.pushkey,
216 self.user_id,
217 self.last_stream_ordering,
218 self.clock.time_msec(),
219 )
213 pusher_still_exists = yield self.store.update_pusher_last_stream_ordering_and_success(
214 self.app_id,
215 self.pushkey,
216 self.user_id,
217 self.last_stream_ordering,
218 self.clock.time_msec(),
220219 )
221220 if not pusher_still_exists:
222221 # The pusher has been deleted while we were processing, so
245244 # we really only give up so that if the URL gets
246245 # fixed, we don't suddenly deliver a load
247246 # of old notifications.
248 logger.warn(
247 logger.warning(
249248 "Giving up on a notification to user %s, " "pushkey %s",
250249 self.user_id,
251250 self.pushkey,
298297 if pk != self.pushkey:
299298 # for sanity, we only remove the pushkey if it
300299 # was the one we actually sent...
301 logger.warn(
300 logger.warning(
302301 ("Ignoring rejected pushkey %s because we" " didn't send it"),
303302 pk,
304303 )
328327 return d
329328
330329 ctx = yield push_tools.get_context_for_event(
331 self.store, self.state_handler, event, self.user_id
330 self.storage, self.state_handler, event, self.user_id
332331 )
333332
334333 d = {
118118 self.store = self.hs.get_datastore()
119119 self.macaroon_gen = self.hs.get_macaroon_generator()
120120 self.state_handler = self.hs.get_state_handler()
121 self.storage = hs.get_storage()
121122 self.app_name = app_name
122123
123124 logger.info("Created Mailer for app_name %s" % app_name)
388389 }
389390
390391 the_events = yield filter_events_for_client(
391 self.store, user_id, results["events_before"]
392 self.storage, user_id, results["events_before"]
392393 )
393394 the_events.append(notif_event)
394395
116116 pattern = UserID.from_string(user_id).localpart
117117
118118 if not pattern:
119 logger.warn("event_match condition with no pattern")
119 logger.warning("event_match condition with no pattern")
120120 return False
121121
122122 # XXX: optimisation: cache our pattern regexps
172172 regex_cache[(glob, word_boundary)] = r
173173 return r.search(value)
174174 except re.error:
175 logger.warn("Failed to parse glob to regex: %r", glob)
175 logger.warning("Failed to parse glob to regex: %r", glob)
176176 return False
177177
178178
1515 from twisted.internet import defer
1616
1717 from synapse.push.presentable_names import calculate_room_name, name_from_member_event
18 from synapse.storage import Storage
1819
1920
2021 @defer.inlineCallbacks
4243
4344
4445 @defer.inlineCallbacks
45 def get_context_for_event(store, state_handler, ev, user_id):
46 def get_context_for_event(storage: Storage, state_handler, ev, user_id):
4647 ctx = {}
4748
48 room_state_ids = yield store.get_state_ids_for_event(ev.event_id)
49 room_state_ids = yield storage.state.get_state_ids_for_event(ev.event_id)
4950
5051 # we no longer bother setting room_alias, and make room_name the
5152 # human-readable name instead, be that m.room.name, an alias or
5253 # a list of people in the room
5354 name = yield calculate_room_name(
54 store, room_state_ids, user_id, fallback_to_single_member=False
55 storage.main, room_state_ids, user_id, fallback_to_single_member=False
5556 )
5657 if name:
5758 ctx["name"] = name
5859
5960 sender_state_event_id = room_state_ids[("m.room.member", ev.sender)]
60 sender_state_event = yield store.get_event(sender_state_event_id)
61 sender_state_event = yield storage.main.get_event(sender_state_event_id)
6162 ctx["sender_display_name"] = name_from_member_event(sender_state_event)
6263
6364 return ctx
102102 # create the pusher setting last_stream_ordering to the current maximum
103103 # stream ordering in event_push_actions, so it will process
104104 # pushes from this point onwards.
105 last_stream_ordering = (
106 yield self.store.get_latest_push_action_stream_ordering()
107 )
105 last_stream_ordering = yield self.store.get_latest_push_action_stream_ordering()
108106
109107 yield self.store.add_pusher(
110108 user_id=user_id,
6060 "bcrypt>=3.1.0",
6161 "pillow>=4.3.0",
6262 "sortedcontainers>=1.4.4",
63 "psutil>=2.0.0",
6463 "pymacaroons>=0.13.0",
6564 "msgpack>=0.5.2",
6665 "phonenumbers>=8.2.0",
109109 return {}
110110
111111 @abc.abstractmethod
112 def _handle_request(self, request, **kwargs):
112 async def _handle_request(self, request, **kwargs):
113113 """Handle incoming request.
114114
115115 This is called with the request object and PATH_ARGS.
116116
117117 Returns:
118 Deferred[dict]: A JSON serialisable dict to be used as response
119 body of request.
118 tuple[int, dict]: HTTP status code and a JSON serialisable dict
119 to be used as response body of request.
120120 """
121121 pass
122122
179179 if e.code != 504 or not cls.RETRY_ON_TIMEOUT:
180180 raise
181181
182 logger.warn("%s request timed out", cls.NAME)
182 logger.warning("%s request timed out", cls.NAME)
183183
184184 # If we timed out we probably don't need to worry about backing
185185 # off too much, but lets just wait a little anyway.
8181
8282 return payload
8383
84 @defer.inlineCallbacks
85 def _handle_request(self, request):
84 async def _handle_request(self, request):
8685 with Measure(self.clock, "repl_fed_send_events_parse"):
8786 content = parse_json_object_from_request(request)
8887
10099 EventType = event_type_from_format_version(format_ver)
101100 event = EventType(event_dict, internal_metadata, rejected_reason)
102101
103 context = yield EventContext.deserialize(
104 self.store, event_payload["context"]
105 )
102 context = EventContext.deserialize(self.store, event_payload["context"])
106103
107104 event_and_contexts.append((event, context))
108105
109106 logger.info("Got %d events from federation", len(event_and_contexts))
110107
111 yield self.federation_handler.persist_events_and_notify(
108 await self.federation_handler.persist_events_and_notify(
112109 event_and_contexts, backfilled
113110 )
114111
143140 def _serialize_payload(edu_type, origin, content):
144141 return {"origin": origin, "content": content}
145142
146 @defer.inlineCallbacks
147 def _handle_request(self, request, edu_type):
143 async def _handle_request(self, request, edu_type):
148144 with Measure(self.clock, "repl_fed_send_edu_parse"):
149145 content = parse_json_object_from_request(request)
150146
153149
154150 logger.info("Got %r edu from %s", edu_type, origin)
155151
156 result = yield self.registry.on_edu(edu_type, origin, edu_content)
152 result = await self.registry.on_edu(edu_type, origin, edu_content)
157153
158154 return 200, result
159155
192188 """
193189 return {"args": args}
194190
195 @defer.inlineCallbacks
196 def _handle_request(self, request, query_type):
191 async def _handle_request(self, request, query_type):
197192 with Measure(self.clock, "repl_fed_query_parse"):
198193 content = parse_json_object_from_request(request)
199194
201196
202197 logger.info("Got %r query", query_type)
203198
204 result = yield self.registry.on_query(query_type, args)
199 result = await self.registry.on_query(query_type, args)
205200
206201 return 200, result
207202
233228 """
234229 return {}
235230
236 @defer.inlineCallbacks
237 def _handle_request(self, request, room_id):
238 yield self.store.clean_room_for_join(room_id)
231 async def _handle_request(self, request, room_id):
232 await self.store.clean_room_for_join(room_id)
239233
240234 return 200, {}
241235
1313 # limitations under the License.
1414
1515 import logging
16
17 from twisted.internet import defer
1816
1917 from synapse.http.servlet import parse_json_object_from_request
2018 from synapse.replication.http._base import ReplicationEndpoint
5149 "is_guest": is_guest,
5250 }
5351
54 @defer.inlineCallbacks
55 def _handle_request(self, request, user_id):
52 async def _handle_request(self, request, user_id):
5653 content = parse_json_object_from_request(request)
5754
5855 device_id = content["device_id"]
5956 initial_display_name = content["initial_display_name"]
6057 is_guest = content["is_guest"]
6158
62 device_id, access_token = yield self.registration_handler.register_device(
59 device_id, access_token = await self.registration_handler.register_device(
6360 user_id, device_id, initial_display_name, is_guest
6461 )
6562
1414
1515 import logging
1616
17 from twisted.internet import defer
18
1917 from synapse.http.servlet import parse_json_object_from_request
2018 from synapse.replication.http._base import ReplicationEndpoint
2119 from synapse.types import Requester, UserID
6462 "content": content,
6563 }
6664
67 @defer.inlineCallbacks
68 def _handle_request(self, request, room_id, user_id):
65 async def _handle_request(self, request, room_id, user_id):
6966 content = parse_json_object_from_request(request)
7067
7168 remote_room_hosts = content["remote_room_hosts"]
7875
7976 logger.info("remote_join: %s into room: %s", user_id, room_id)
8077
81 yield self.federation_handler.do_invite_join(
78 await self.federation_handler.do_invite_join(
8279 remote_room_hosts, room_id, user_id, event_content
8380 )
8481
122119 "remote_room_hosts": remote_room_hosts,
123120 }
124121
125 @defer.inlineCallbacks
126 def _handle_request(self, request, room_id, user_id):
122 async def _handle_request(self, request, room_id, user_id):
127123 content = parse_json_object_from_request(request)
128124
129125 remote_room_hosts = content["remote_room_hosts"]
136132 logger.info("remote_reject_invite: %s out of room: %s", user_id, room_id)
137133
138134 try:
139 event = yield self.federation_handler.do_remotely_reject_invite(
135 event = await self.federation_handler.do_remotely_reject_invite(
140136 remote_room_hosts, room_id, user_id
141137 )
142138 ret = event.get_pdu_json()
147143 # The 'except' clause is very broad, but we need to
148144 # capture everything from DNS failures upwards
149145 #
150 logger.warn("Failed to reject invite: %s", e)
151
152 yield self.store.locally_reject_invite(user_id, room_id)
146 logger.warning("Failed to reject invite: %s", e)
147
148 await self.store.locally_reject_invite(user_id, room_id)
153149 ret = {}
154150
155151 return 200, ret
1313 # limitations under the License.
1414
1515 import logging
16
17 from twisted.internet import defer
1816
1917 from synapse.http.servlet import parse_json_object_from_request
2018 from synapse.replication.http._base import ReplicationEndpoint
7371 "address": address,
7472 }
7573
76 @defer.inlineCallbacks
77 def _handle_request(self, request, user_id):
74 async def _handle_request(self, request, user_id):
7875 content = parse_json_object_from_request(request)
7976
80 yield self.registration_handler.register_with_store(
77 self.registration_handler.check_registration_ratelimit(content["address"])
78
79 await self.registration_handler.register_with_store(
8180 user_id=user_id,
8281 password_hash=content["password_hash"],
8382 was_guest=content["was_guest"],
116115 """
117116 return {"auth_result": auth_result, "access_token": access_token}
118117
119 @defer.inlineCallbacks
120 def _handle_request(self, request, user_id):
118 async def _handle_request(self, request, user_id):
121119 content = parse_json_object_from_request(request)
122120
123121 auth_result = content["auth_result"]
124122 access_token = content["access_token"]
125123
126 yield self.registration_handler.post_registration_actions(
124 await self.registration_handler.post_registration_actions(
127125 user_id=user_id, auth_result=auth_result, access_token=access_token
128126 )
129127
8686
8787 return payload
8888
89 @defer.inlineCallbacks
90 def _handle_request(self, request, event_id):
89 async def _handle_request(self, request, event_id):
9190 with Measure(self.clock, "repl_send_event_parse"):
9291 content = parse_json_object_from_request(request)
9392
10099 event = EventType(event_dict, internal_metadata, rejected_reason)
101100
102101 requester = Requester.deserialize(self.store, content["requester"])
103 context = yield EventContext.deserialize(self.store, content["context"])
102 context = EventContext.deserialize(self.store, content["context"])
104103
105104 ratelimit = content["ratelimit"]
106105 extra_users = [UserID.from_string(u) for u in content["extra_users"]]
112111 "Got event to send with ID: %s into room: %s", event.event_id, event.room_id
113112 )
114113
115 yield self.event_creation_handler.persist_and_notify_client_event(
114 await self.event_creation_handler.persist_and_notify_client_event(
116115 requester, event, context, ratelimit=ratelimit, extra_users=extra_users
117116 )
118117
1313 # limitations under the License.
1414
1515 import logging
16 from typing import Dict
1617
1718 import six
1819
4344
4445 self.hs = hs
4546
46 def stream_positions(self):
47 def stream_positions(self) -> Dict[str, int]:
48 """
49 Get the current positions of all the streams this store wants to subscribe to
50
51 Returns:
52 map from stream name to the most recent update we have for
53 that stream (ie, the point we want to start replicating from)
54 """
4755 pos = {}
4856 if self._cache_id_gen:
4957 pos["caches"] = self._cache_id_gen.get_current_token()
1414
1515 from synapse.replication.slave.storage._base import BaseSlavedStore
1616 from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
17 from synapse.replication.tcp.streams._base import DeviceListsStream, UserSignatureStream
1718 from synapse.storage.data_stores.main.devices import DeviceWorkerStore
1819 from synapse.storage.data_stores.main.end_to_end_keys import EndToEndKeyWorkerStore
1920 from synapse.util.caches.stream_change_cache import StreamChangeCache
4142
4243 def stream_positions(self):
4344 result = super(SlavedDeviceStore, self).stream_positions()
44 result["device_lists"] = self._device_list_id_gen.get_current_token()
45 # The user signature stream uses the same stream ID generator as the
46 # device list stream, so set them both to the device list ID
47 # generator's current token.
48 current_token = self._device_list_id_gen.get_current_token()
49 result[DeviceListsStream.NAME] = current_token
50 result[UserSignatureStream.NAME] = current_token
4551 return result
4652
4753 def process_replication_rows(self, stream_name, token, rows):
48 if stream_name == "device_lists":
54 if stream_name == DeviceListsStream.NAME:
4955 self._device_list_id_gen.advance(token)
5056 for row in rows:
5157 self._invalidate_caches_for_devices(token, row.user_id, row.destination)
58 elif stream_name == UserSignatureStream.NAME:
59 for row in rows:
60 self._user_signature_stream_cache.entity_has_changed(row.user_id, token)
5261 return super(SlavedDeviceStore, self).process_replication_rows(
5362 stream_name, token, rows
5463 )
1515 """
1616
1717 import logging
18 from typing import Dict
1819
1920 from twisted.internet import defer
2021 from twisted.internet.protocol import ReconnectingClientFactory
22
23 from synapse.replication.slave.storage._base import BaseSlavedStore
24 from synapse.replication.tcp.protocol import (
25 AbstractReplicationClientHandler,
26 ClientReplicationStreamProtocol,
27 )
2128
2229 from .commands import (
2330 FederationAckCommand,
2633 UserIpCommand,
2734 UserSyncCommand,
2835 )
29 from .protocol import ClientReplicationStreamProtocol
3036
3137 logger = logging.getLogger(__name__)
3238
4147
4248 maxDelay = 30 # Try at least once every N seconds
4349
44 def __init__(self, hs, client_name, handler):
50 def __init__(self, hs, client_name, handler: AbstractReplicationClientHandler):
4551 self.client_name = client_name
4652 self.handler = handler
4753 self.server_name = hs.config.server_name
6773 ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
6874
6975
70 class ReplicationClientHandler(object):
76 class ReplicationClientHandler(AbstractReplicationClientHandler):
7177 """A base handler that can be passed to the ReplicationClientFactory.
7278
7379 By default proxies incoming replication data to the SlaveStore.
7480 """
7581
76 def __init__(self, store):
82 def __init__(self, store: BaseSlavedStore):
7783 self.store = store
7884
7985 # The current connection. None if we are currently (re)connecting
137143 if d:
138144 d.callback(data)
139145
140 def get_streams_to_replicate(self):
146 def get_streams_to_replicate(self) -> Dict[str, int]:
141147 """Called when a new connection has been established and we need to
142148 subscribe to streams.
143149
144 Returns a dictionary of stream name to token.
150 Returns:
151 map from stream name to the most recent update we have for
152 that stream (ie, the point we want to start replicating from)
145153 """
146154 args = self.store.stream_positions()
147155 user_account_data = args.pop("user_account_data", None)
167175 if self.connection:
168176 self.connection.send_command(cmd)
169177 else:
170 logger.warn("Queuing command as not connected: %r", cmd.NAME)
178 logger.warning("Queuing command as not connected: %r", cmd.NAME)
171179 self.pending_commands.append(cmd)
172180
173181 def send_federation_ack(self, token):
4747 > ERROR server stopping
4848 * connection closed by server *
4949 """
50
50 import abc
5151 import fcntl
5252 import logging
5353 import struct
6464 from synapse.logging.context import make_deferred_yieldable, run_in_background
6565 from synapse.metrics import LaterGauge
6666 from synapse.metrics.background_process_metrics import run_as_background_process
67 from synapse.util import Clock
6768 from synapse.util.stringutils import random_string
6869
6970 from .commands import (
248249 return handler(cmd)
249250
250251 def close(self):
251 logger.warn("[%s] Closing connection", self.id())
252 logger.warning("[%s] Closing connection", self.id())
252253 self.time_we_closed = self.clock.time_msec()
253254 self.transport.loseConnection()
254255 self.on_connection_closed()
557558 self.streamer.lost_connection(self)
558559
559560
561 class AbstractReplicationClientHandler(metaclass=abc.ABCMeta):
562 """
563 The interface for the handler that should be passed to
564 ClientReplicationStreamProtocol
565 """
566
567 @abc.abstractmethod
568 def on_rdata(self, stream_name, token, rows):
569 """Called to handle a batch of replication data with a given stream token.
570
571 Args:
572 stream_name (str): name of the replication stream for this batch of rows
573 token (int): stream token for this batch of rows
574 rows (list): a list of Stream.ROW_TYPE objects as returned by
575 Stream.parse_row.
576
577 Returns:
578 Deferred|None
579 """
580 raise NotImplementedError()
581
582 @abc.abstractmethod
583 def on_position(self, stream_name, token):
584 """Called when we get new position data."""
585 raise NotImplementedError()
586
587 @abc.abstractmethod
588 def on_sync(self, data):
589 """Called when get a new SYNC command."""
590 raise NotImplementedError()
591
592 @abc.abstractmethod
593 def get_streams_to_replicate(self):
594 """Called when a new connection has been established and we need to
595 subscribe to streams.
596
597 Returns:
598 map from stream name to the most recent update we have for
599 that stream (ie, the point we want to start replicating from)
600 """
601 raise NotImplementedError()
602
603 @abc.abstractmethod
604 def get_currently_syncing_users(self):
605 """Get the list of currently syncing users (if any). This is called
606 when a connection has been established and we need to send the
607 currently syncing users."""
608 raise NotImplementedError()
609
610 @abc.abstractmethod
611 def update_connection(self, connection):
612 """Called when a connection has been established (or lost with None).
613 """
614 raise NotImplementedError()
615
616 @abc.abstractmethod
617 def finished_connecting(self):
618 """Called when we have successfully subscribed and caught up to all
619 streams we're interested in.
620 """
621 raise NotImplementedError()
622
623
560624 class ClientReplicationStreamProtocol(BaseReplicationStreamProtocol):
561625 VALID_INBOUND_COMMANDS = VALID_SERVER_COMMANDS
562626 VALID_OUTBOUND_COMMANDS = VALID_CLIENT_COMMANDS
563627
564 def __init__(self, client_name, server_name, clock, handler):
628 def __init__(
629 self,
630 client_name: str,
631 server_name: str,
632 clock: Clock,
633 handler: AbstractReplicationClientHandler,
634 ):
565635 BaseReplicationStreamProtocol.__init__(self, clock)
566636
567637 self.client_name = client_name
4444 _base.TagAccountDataStream,
4545 _base.AccountDataStream,
4646 _base.GroupServerStream,
47 _base.UserSignatureStream,
4748 )
4849 }
9494 "GroupsStreamRow",
9595 ("group_id", "user_id", "type", "content"), # str # str # str # dict
9696 )
97 UserSignatureStreamRow = namedtuple("UserSignatureStreamRow", ("user_id")) # str
9798
9899
99100 class Stream(object):
437438 self.update_function = store.get_all_groups_changes
438439
439440 super(GroupServerStream, self).__init__(hs)
441
442
443 class UserSignatureStream(Stream):
444 """A user has signed their own device with their user-signing key
445 """
446
447 NAME = "user_signature"
448 _LIMITED = False
449 ROW_TYPE = UserSignatureStreamRow
450
451 def __init__(self, hs):
452 store = hs.get_datastore()
453
454 self.current_token = store.get_device_stream_token
455 self.update_function = store.get_all_user_signature_changes_for_remotes
456
457 super(UserSignatureStream, self).__init__(hs)
1313 # See the License for the specific language governing permissions and
1414 # limitations under the License.
1515
16 import hashlib
17 import hmac
1816 import logging
1917 import platform
2018 import re
2119
22 from six import text_type
23 from six.moves import http_client
24
2520 import synapse
26 from synapse.api.constants import Membership, UserTypes
2721 from synapse.api.errors import Codes, NotFoundError, SynapseError
2822 from synapse.http.server import JsonResource
29 from synapse.http.servlet import (
30 RestServlet,
31 assert_params_in_dict,
32 parse_integer,
33 parse_json_object_from_request,
34 parse_string,
35 )
23 from synapse.http.servlet import RestServlet, parse_json_object_from_request
3624 from synapse.rest.admin._base import (
3725 assert_requester_is_admin,
38 assert_user_is_admin,
3926 historical_admin_path_patterns,
4027 )
28 from synapse.rest.admin.groups import DeleteGroupAdminRestServlet
4129 from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo
4230 from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet
31 from synapse.rest.admin.rooms import ShutdownRoomRestServlet
4332 from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet
44 from synapse.rest.admin.users import UserAdminServlet
45 from synapse.types import UserID, create_requester
46 from synapse.util.async_helpers import maybe_awaitable
33 from synapse.rest.admin.users import (
34 AccountValidityRenewServlet,
35 DeactivateAccountRestServlet,
36 GetUsersPaginatedRestServlet,
37 ResetPasswordRestServlet,
38 SearchUsersRestServlet,
39 UserAdminServlet,
40 UserRegisterServlet,
41 UsersRestServlet,
42 WhoisRestServlet,
43 )
4744 from synapse.util.versionstring import get_version_string
4845
4946 logger = logging.getLogger(__name__)
50
51
52 class UsersRestServlet(RestServlet):
53 PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)$")
54
55 def __init__(self, hs):
56 self.hs = hs
57 self.auth = hs.get_auth()
58 self.handlers = hs.get_handlers()
59
60 async def on_GET(self, request, user_id):
61 target_user = UserID.from_string(user_id)
62 await assert_requester_is_admin(self.auth, request)
63
64 if not self.hs.is_mine(target_user):
65 raise SynapseError(400, "Can only users a local user")
66
67 ret = await self.handlers.admin_handler.get_users()
68
69 return 200, ret
7047
7148
7249 class VersionServlet(RestServlet):
8057
8158 def on_GET(self, request):
8259 return 200, self.res
83
84
85 class UserRegisterServlet(RestServlet):
86 """
87 Attributes:
88 NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted
89 nonces (dict[str, int]): The nonces that we will accept. A dict of
90 nonce to the time it was generated, in int seconds.
91 """
92
93 PATTERNS = historical_admin_path_patterns("/register")
94 NONCE_TIMEOUT = 60
95
96 def __init__(self, hs):
97 self.handlers = hs.get_handlers()
98 self.reactor = hs.get_reactor()
99 self.nonces = {}
100 self.hs = hs
101
102 def _clear_old_nonces(self):
103 """
104 Clear out old nonces that are older than NONCE_TIMEOUT.
105 """
106 now = int(self.reactor.seconds())
107
108 for k, v in list(self.nonces.items()):
109 if now - v > self.NONCE_TIMEOUT:
110 del self.nonces[k]
111
112 def on_GET(self, request):
113 """
114 Generate a new nonce.
115 """
116 self._clear_old_nonces()
117
118 nonce = self.hs.get_secrets().token_hex(64)
119 self.nonces[nonce] = int(self.reactor.seconds())
120 return 200, {"nonce": nonce}
121
122 async def on_POST(self, request):
123 self._clear_old_nonces()
124
125 if not self.hs.config.registration_shared_secret:
126 raise SynapseError(400, "Shared secret registration is not enabled")
127
128 body = parse_json_object_from_request(request)
129
130 if "nonce" not in body:
131 raise SynapseError(400, "nonce must be specified", errcode=Codes.BAD_JSON)
132
133 nonce = body["nonce"]
134
135 if nonce not in self.nonces:
136 raise SynapseError(400, "unrecognised nonce")
137
138 # Delete the nonce, so it can't be reused, even if it's invalid
139 del self.nonces[nonce]
140
141 if "username" not in body:
142 raise SynapseError(
143 400, "username must be specified", errcode=Codes.BAD_JSON
144 )
145 else:
146 if (
147 not isinstance(body["username"], text_type)
148 or len(body["username"]) > 512
149 ):
150 raise SynapseError(400, "Invalid username")
151
152 username = body["username"].encode("utf-8")
153 if b"\x00" in username:
154 raise SynapseError(400, "Invalid username")
155
156 if "password" not in body:
157 raise SynapseError(
158 400, "password must be specified", errcode=Codes.BAD_JSON
159 )
160 else:
161 if (
162 not isinstance(body["password"], text_type)
163 or len(body["password"]) > 512
164 ):
165 raise SynapseError(400, "Invalid password")
166
167 password = body["password"].encode("utf-8")
168 if b"\x00" in password:
169 raise SynapseError(400, "Invalid password")
170
171 admin = body.get("admin", None)
172 user_type = body.get("user_type", None)
173
174 if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
175 raise SynapseError(400, "Invalid user type")
176
177 got_mac = body["mac"]
178
179 want_mac = hmac.new(
180 key=self.hs.config.registration_shared_secret.encode(),
181 digestmod=hashlib.sha1,
182 )
183 want_mac.update(nonce.encode("utf8"))
184 want_mac.update(b"\x00")
185 want_mac.update(username)
186 want_mac.update(b"\x00")
187 want_mac.update(password)
188 want_mac.update(b"\x00")
189 want_mac.update(b"admin" if admin else b"notadmin")
190 if user_type:
191 want_mac.update(b"\x00")
192 want_mac.update(user_type.encode("utf8"))
193 want_mac = want_mac.hexdigest()
194
195 if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")):
196 raise SynapseError(403, "HMAC incorrect")
197
198 # Reuse the parts of RegisterRestServlet to reduce code duplication
199 from synapse.rest.client.v2_alpha.register import RegisterRestServlet
200
201 register = RegisterRestServlet(self.hs)
202
203 user_id = await register.registration_handler.register_user(
204 localpart=body["username"].lower(),
205 password=body["password"],
206 admin=bool(admin),
207 user_type=user_type,
208 )
209
210 result = await register._create_registration_details(user_id, body)
211 return 200, result
212
213
214 class WhoisRestServlet(RestServlet):
215 PATTERNS = historical_admin_path_patterns("/whois/(?P<user_id>[^/]*)")
216
217 def __init__(self, hs):
218 self.hs = hs
219 self.auth = hs.get_auth()
220 self.handlers = hs.get_handlers()
221
222 async def on_GET(self, request, user_id):
223 target_user = UserID.from_string(user_id)
224 requester = await self.auth.get_user_by_req(request)
225 auth_user = requester.user
226
227 if target_user != auth_user:
228 await assert_user_is_admin(self.auth, auth_user)
229
230 if not self.hs.is_mine(target_user):
231 raise SynapseError(400, "Can only whois a local user")
232
233 ret = await self.handlers.admin_handler.get_whois(target_user)
234
235 return 200, ret
23660
23761
23862 class PurgeHistoryRestServlet(RestServlet):
285109 room_id, stream_ordering
286110 )
287111 if not r:
288 logger.warn(
112 logger.warning(
289113 "[purge] purging events not possible: No event found "
290114 "(received_ts %i => stream_ordering %i)",
291115 ts,
341165 return 200, purge_status.asdict()
342166
343167
344 class DeactivateAccountRestServlet(RestServlet):
345 PATTERNS = historical_admin_path_patterns("/deactivate/(?P<target_user_id>[^/]*)")
346
347 def __init__(self, hs):
348 self._deactivate_account_handler = hs.get_deactivate_account_handler()
349 self.auth = hs.get_auth()
350
351 async def on_POST(self, request, target_user_id):
352 await assert_requester_is_admin(self.auth, request)
353 body = parse_json_object_from_request(request, allow_empty_body=True)
354 erase = body.get("erase", False)
355 if not isinstance(erase, bool):
356 raise SynapseError(
357 http_client.BAD_REQUEST,
358 "Param 'erase' must be a boolean, if given",
359 Codes.BAD_JSON,
360 )
361
362 UserID.from_string(target_user_id)
363
364 result = await self._deactivate_account_handler.deactivate_account(
365 target_user_id, erase
366 )
367 if result:
368 id_server_unbind_result = "success"
369 else:
370 id_server_unbind_result = "no-support"
371
372 return 200, {"id_server_unbind_result": id_server_unbind_result}
373
374
375 class ShutdownRoomRestServlet(RestServlet):
376 """Shuts down a room by removing all local users from the room and blocking
377 all future invites and joins to the room. Any local aliases will be repointed
378 to a new room created by `new_room_user_id` and kicked users will be auto
379 joined to the new room.
380 """
381
382 PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P<room_id>[^/]+)")
383
384 DEFAULT_MESSAGE = (
385 "Sharing illegal content on this server is not permitted and rooms in"
386 " violation will be blocked."
387 )
388
389 def __init__(self, hs):
390 self.hs = hs
391 self.store = hs.get_datastore()
392 self.state = hs.get_state_handler()
393 self._room_creation_handler = hs.get_room_creation_handler()
394 self.event_creation_handler = hs.get_event_creation_handler()
395 self.room_member_handler = hs.get_room_member_handler()
396 self.auth = hs.get_auth()
397
398 async def on_POST(self, request, room_id):
399 requester = await self.auth.get_user_by_req(request)
400 await assert_user_is_admin(self.auth, requester.user)
401
402 content = parse_json_object_from_request(request)
403 assert_params_in_dict(content, ["new_room_user_id"])
404 new_room_user_id = content["new_room_user_id"]
405
406 room_creator_requester = create_requester(new_room_user_id)
407
408 message = content.get("message", self.DEFAULT_MESSAGE)
409 room_name = content.get("room_name", "Content Violation Notification")
410
411 info = await self._room_creation_handler.create_room(
412 room_creator_requester,
413 config={
414 "preset": "public_chat",
415 "name": room_name,
416 "power_level_content_override": {"users_default": -10},
417 },
418 ratelimit=False,
419 )
420 new_room_id = info["room_id"]
421
422 requester_user_id = requester.user.to_string()
423
424 logger.info(
425 "Shutting down room %r, joining to new room: %r", room_id, new_room_id
426 )
427
428 # This will work even if the room is already blocked, but that is
429 # desirable in case the first attempt at blocking the room failed below.
430 await self.store.block_room(room_id, requester_user_id)
431
432 users = await self.state.get_current_users_in_room(room_id)
433 kicked_users = []
434 failed_to_kick_users = []
435 for user_id in users:
436 if not self.hs.is_mine_id(user_id):
437 continue
438
439 logger.info("Kicking %r from %r...", user_id, room_id)
440
441 try:
442 target_requester = create_requester(user_id)
443 await self.room_member_handler.update_membership(
444 requester=target_requester,
445 target=target_requester.user,
446 room_id=room_id,
447 action=Membership.LEAVE,
448 content={},
449 ratelimit=False,
450 require_consent=False,
451 )
452
453 await self.room_member_handler.forget(target_requester.user, room_id)
454
455 await self.room_member_handler.update_membership(
456 requester=target_requester,
457 target=target_requester.user,
458 room_id=new_room_id,
459 action=Membership.JOIN,
460 content={},
461 ratelimit=False,
462 require_consent=False,
463 )
464
465 kicked_users.append(user_id)
466 except Exception:
467 logger.exception(
468 "Failed to leave old room and join new room for %r", user_id
469 )
470 failed_to_kick_users.append(user_id)
471
472 await self.event_creation_handler.create_and_send_nonmember_event(
473 room_creator_requester,
474 {
475 "type": "m.room.message",
476 "content": {"body": message, "msgtype": "m.text"},
477 "room_id": new_room_id,
478 "sender": new_room_user_id,
479 },
480 ratelimit=False,
481 )
482
483 aliases_for_room = await maybe_awaitable(
484 self.store.get_aliases_for_room(room_id)
485 )
486
487 await self.store.update_aliases_for_room(
488 room_id, new_room_id, requester_user_id
489 )
490
491 return (
492 200,
493 {
494 "kicked_users": kicked_users,
495 "failed_to_kick_users": failed_to_kick_users,
496 "local_aliases": aliases_for_room,
497 "new_room_id": new_room_id,
498 },
499 )
500
501
502 class ResetPasswordRestServlet(RestServlet):
503 """Post request to allow an administrator reset password for a user.
504 This needs user to have administrator access in Synapse.
505 Example:
506 http://localhost:8008/_synapse/admin/v1/reset_password/
507 @user:to_reset_password?access_token=admin_access_token
508 JsonBodyToSend:
509 {
510 "new_password": "secret"
511 }
512 Returns:
513 200 OK with empty object if success otherwise an error.
514 """
515
516 PATTERNS = historical_admin_path_patterns(
517 "/reset_password/(?P<target_user_id>[^/]*)"
518 )
519
520 def __init__(self, hs):
521 self.store = hs.get_datastore()
522 self.hs = hs
523 self.auth = hs.get_auth()
524 self._set_password_handler = hs.get_set_password_handler()
525
526 async def on_POST(self, request, target_user_id):
527 """Post request to allow an administrator reset password for a user.
528 This needs user to have administrator access in Synapse.
529 """
530 requester = await self.auth.get_user_by_req(request)
531 await assert_user_is_admin(self.auth, requester.user)
532
533 UserID.from_string(target_user_id)
534
535 params = parse_json_object_from_request(request)
536 assert_params_in_dict(params, ["new_password"])
537 new_password = params["new_password"]
538
539 await self._set_password_handler.set_password(
540 target_user_id, new_password, requester
541 )
542 return 200, {}
543
544
545 class GetUsersPaginatedRestServlet(RestServlet):
546 """Get request to get specific number of users from Synapse.
547 This needs user to have administrator access in Synapse.
548 Example:
549 http://localhost:8008/_synapse/admin/v1/users_paginate/
550 @admin:user?access_token=admin_access_token&start=0&limit=10
551 Returns:
552 200 OK with json object {list[dict[str, Any]], count} or empty object.
553 """
554
555 PATTERNS = historical_admin_path_patterns(
556 "/users_paginate/(?P<target_user_id>[^/]*)"
557 )
558
559 def __init__(self, hs):
560 self.store = hs.get_datastore()
561 self.hs = hs
562 self.auth = hs.get_auth()
563 self.handlers = hs.get_handlers()
564
565 async def on_GET(self, request, target_user_id):
566 """Get request to get specific number of users from Synapse.
567 This needs user to have administrator access in Synapse.
568 """
569 await assert_requester_is_admin(self.auth, request)
570
571 target_user = UserID.from_string(target_user_id)
572
573 if not self.hs.is_mine(target_user):
574 raise SynapseError(400, "Can only users a local user")
575
576 order = "name" # order by name in user table
577 start = parse_integer(request, "start", required=True)
578 limit = parse_integer(request, "limit", required=True)
579
580 logger.info("limit: %s, start: %s", limit, start)
581
582 ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit)
583 return 200, ret
584
585 async def on_POST(self, request, target_user_id):
586 """Post request to get specific number of users from Synapse..
587 This needs user to have administrator access in Synapse.
588 Example:
589 http://localhost:8008/_synapse/admin/v1/users_paginate/
590 @admin:user?access_token=admin_access_token
591 JsonBodyToSend:
592 {
593 "start": "0",
594 "limit": "10
595 }
596 Returns:
597 200 OK with json object {list[dict[str, Any]], count} or empty object.
598 """
599 await assert_requester_is_admin(self.auth, request)
600 UserID.from_string(target_user_id)
601
602 order = "name" # order by name in user table
603 params = parse_json_object_from_request(request)
604 assert_params_in_dict(params, ["limit", "start"])
605 limit = params["limit"]
606 start = params["start"]
607 logger.info("limit: %s, start: %s", limit, start)
608
609 ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit)
610 return 200, ret
611
612
613 class SearchUsersRestServlet(RestServlet):
614 """Get request to search user table for specific users according to
615 search term.
616 This needs user to have administrator access in Synapse.
617 Example:
618 http://localhost:8008/_synapse/admin/v1/search_users/
619 @admin:user?access_token=admin_access_token&term=alice
620 Returns:
621 200 OK with json object {list[dict[str, Any]], count} or empty object.
622 """
623
624 PATTERNS = historical_admin_path_patterns("/search_users/(?P<target_user_id>[^/]*)")
625
626 def __init__(self, hs):
627 self.store = hs.get_datastore()
628 self.hs = hs
629 self.auth = hs.get_auth()
630 self.handlers = hs.get_handlers()
631
632 async def on_GET(self, request, target_user_id):
633 """Get request to search user table for specific users according to
634 search term.
635 This needs user to have a administrator access in Synapse.
636 """
637 await assert_requester_is_admin(self.auth, request)
638
639 target_user = UserID.from_string(target_user_id)
640
641 # To allow all users to get the users list
642 # if not is_admin and target_user != auth_user:
643 # raise AuthError(403, "You are not a server admin")
644
645 if not self.hs.is_mine(target_user):
646 raise SynapseError(400, "Can only users a local user")
647
648 term = parse_string(request, "term", required=True)
649 logger.info("term: %s ", term)
650
651 ret = await self.handlers.admin_handler.search_users(term)
652 return 200, ret
653
654
655 class DeleteGroupAdminRestServlet(RestServlet):
656 """Allows deleting of local groups
657 """
658
659 PATTERNS = historical_admin_path_patterns("/delete_group/(?P<group_id>[^/]*)")
660
661 def __init__(self, hs):
662 self.group_server = hs.get_groups_server_handler()
663 self.is_mine_id = hs.is_mine_id
664 self.auth = hs.get_auth()
665
666 async def on_POST(self, request, group_id):
667 requester = await self.auth.get_user_by_req(request)
668 await assert_user_is_admin(self.auth, requester.user)
669
670 if not self.is_mine_id(group_id):
671 raise SynapseError(400, "Can only delete local groups")
672
673 await self.group_server.delete_group(group_id, requester.user.to_string())
674 return 200, {}
675
676
677 class AccountValidityRenewServlet(RestServlet):
678 PATTERNS = historical_admin_path_patterns("/account_validity/validity$")
679
680 def __init__(self, hs):
681 """
682 Args:
683 hs (synapse.server.HomeServer): server
684 """
685 self.hs = hs
686 self.account_activity_handler = hs.get_account_validity_handler()
687 self.auth = hs.get_auth()
688
689 async def on_POST(self, request):
690 await assert_requester_is_admin(self.auth, request)
691
692 body = parse_json_object_from_request(request)
693
694 if "user_id" not in body:
695 raise SynapseError(400, "Missing property 'user_id' in the request body")
696
697 expiration_ts = await self.account_activity_handler.renew_account_for_user(
698 body["user_id"],
699 body.get("expiration_ts"),
700 not body.get("enable_renewal_emails", True),
701 )
702
703 res = {"expiration_ts": expiration_ts}
704 return 200, res
705
706
707168 ########################################################################################
708169 #
709170 # please don't add more servlets here: this file is already long and unwieldy. Put
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 The Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15
16 from synapse.api.errors import SynapseError
17 from synapse.http.servlet import RestServlet
18 from synapse.rest.admin._base import (
19 assert_user_is_admin,
20 historical_admin_path_patterns,
21 )
22
23 logger = logging.getLogger(__name__)
24
25
26 class DeleteGroupAdminRestServlet(RestServlet):
27 """Allows deleting of local groups
28 """
29
30 PATTERNS = historical_admin_path_patterns("/delete_group/(?P<group_id>[^/]*)")
31
32 def __init__(self, hs):
33 self.group_server = hs.get_groups_server_handler()
34 self.is_mine_id = hs.is_mine_id
35 self.auth = hs.get_auth()
36
37 async def on_POST(self, request, group_id):
38 requester = await self.auth.get_user_by_req(request)
39 await assert_user_is_admin(self.auth, requester.user)
40
41 if not self.is_mine_id(group_id):
42 raise SynapseError(400, "Can only delete local groups")
43
44 await self.group_server.delete_group(group_id, requester.user.to_string())
45 return 200, {}
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 The Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15
16 from synapse.api.constants import Membership
17 from synapse.http.servlet import (
18 RestServlet,
19 assert_params_in_dict,
20 parse_json_object_from_request,
21 )
22 from synapse.rest.admin._base import (
23 assert_user_is_admin,
24 historical_admin_path_patterns,
25 )
26 from synapse.types import create_requester
27 from synapse.util.async_helpers import maybe_awaitable
28
29 logger = logging.getLogger(__name__)
30
31
32 class ShutdownRoomRestServlet(RestServlet):
33 """Shuts down a room by removing all local users from the room and blocking
34 all future invites and joins to the room. Any local aliases will be repointed
35 to a new room created by `new_room_user_id` and kicked users will be auto
36 joined to the new room.
37 """
38
39 PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P<room_id>[^/]+)")
40
41 DEFAULT_MESSAGE = (
42 "Sharing illegal content on this server is not permitted and rooms in"
43 " violation will be blocked."
44 )
45
46 def __init__(self, hs):
47 self.hs = hs
48 self.store = hs.get_datastore()
49 self.state = hs.get_state_handler()
50 self._room_creation_handler = hs.get_room_creation_handler()
51 self.event_creation_handler = hs.get_event_creation_handler()
52 self.room_member_handler = hs.get_room_member_handler()
53 self.auth = hs.get_auth()
54
55 async def on_POST(self, request, room_id):
56 requester = await self.auth.get_user_by_req(request)
57 await assert_user_is_admin(self.auth, requester.user)
58
59 content = parse_json_object_from_request(request)
60 assert_params_in_dict(content, ["new_room_user_id"])
61 new_room_user_id = content["new_room_user_id"]
62
63 room_creator_requester = create_requester(new_room_user_id)
64
65 message = content.get("message", self.DEFAULT_MESSAGE)
66 room_name = content.get("room_name", "Content Violation Notification")
67
68 info = await self._room_creation_handler.create_room(
69 room_creator_requester,
70 config={
71 "preset": "public_chat",
72 "name": room_name,
73 "power_level_content_override": {"users_default": -10},
74 },
75 ratelimit=False,
76 )
77 new_room_id = info["room_id"]
78
79 requester_user_id = requester.user.to_string()
80
81 logger.info(
82 "Shutting down room %r, joining to new room: %r", room_id, new_room_id
83 )
84
85 # This will work even if the room is already blocked, but that is
86 # desirable in case the first attempt at blocking the room failed below.
87 await self.store.block_room(room_id, requester_user_id)
88
89 users = await self.state.get_current_users_in_room(room_id)
90 kicked_users = []
91 failed_to_kick_users = []
92 for user_id in users:
93 if not self.hs.is_mine_id(user_id):
94 continue
95
96 logger.info("Kicking %r from %r...", user_id, room_id)
97
98 try:
99 target_requester = create_requester(user_id)
100 await self.room_member_handler.update_membership(
101 requester=target_requester,
102 target=target_requester.user,
103 room_id=room_id,
104 action=Membership.LEAVE,
105 content={},
106 ratelimit=False,
107 require_consent=False,
108 )
109
110 await self.room_member_handler.forget(target_requester.user, room_id)
111
112 await self.room_member_handler.update_membership(
113 requester=target_requester,
114 target=target_requester.user,
115 room_id=new_room_id,
116 action=Membership.JOIN,
117 content={},
118 ratelimit=False,
119 require_consent=False,
120 )
121
122 kicked_users.append(user_id)
123 except Exception:
124 logger.exception(
125 "Failed to leave old room and join new room for %r", user_id
126 )
127 failed_to_kick_users.append(user_id)
128
129 await self.event_creation_handler.create_and_send_nonmember_event(
130 room_creator_requester,
131 {
132 "type": "m.room.message",
133 "content": {"body": message, "msgtype": "m.text"},
134 "room_id": new_room_id,
135 "sender": new_room_user_id,
136 },
137 ratelimit=False,
138 )
139
140 aliases_for_room = await maybe_awaitable(
141 self.store.get_aliases_for_room(room_id)
142 )
143
144 await self.store.update_aliases_for_room(
145 room_id, new_room_id, requester_user_id
146 )
147
148 return (
149 200,
150 {
151 "kicked_users": kicked_users,
152 "failed_to_kick_users": failed_to_kick_users,
153 "local_aliases": aliases_for_room,
154 "new_room_id": new_room_id,
155 },
156 )
1111 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212 # See the License for the specific language governing permissions and
1313 # limitations under the License.
14 import hashlib
15 import hmac
16 import logging
1417 import re
1518
16 from synapse.api.errors import SynapseError
19 from six import text_type
20 from six.moves import http_client
21
22 from synapse.api.constants import UserTypes
23 from synapse.api.errors import Codes, SynapseError
1724 from synapse.http.servlet import (
1825 RestServlet,
1926 assert_params_in_dict,
27 parse_integer,
2028 parse_json_object_from_request,
29 parse_string,
2130 )
22 from synapse.rest.admin import assert_requester_is_admin, assert_user_is_admin
31 from synapse.rest.admin._base import (
32 assert_requester_is_admin,
33 assert_user_is_admin,
34 historical_admin_path_patterns,
35 )
2336 from synapse.types import UserID
37
38 logger = logging.getLogger(__name__)
39
40
41 class UsersRestServlet(RestServlet):
42 PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)$")
43
44 def __init__(self, hs):
45 self.hs = hs
46 self.auth = hs.get_auth()
47 self.admin_handler = hs.get_handlers().admin_handler
48
49 async def on_GET(self, request, user_id):
50 target_user = UserID.from_string(user_id)
51 await assert_requester_is_admin(self.auth, request)
52
53 if not self.hs.is_mine(target_user):
54 raise SynapseError(400, "Can only users a local user")
55
56 ret = await self.admin_handler.get_users()
57
58 return 200, ret
59
60
61 class GetUsersPaginatedRestServlet(RestServlet):
62 """Get request to get specific number of users from Synapse.
63 This needs user to have administrator access in Synapse.
64 Example:
65 http://localhost:8008/_synapse/admin/v1/users_paginate/
66 @admin:user?access_token=admin_access_token&start=0&limit=10
67 Returns:
68 200 OK with json object {list[dict[str, Any]], count} or empty object.
69 """
70
71 PATTERNS = historical_admin_path_patterns(
72 "/users_paginate/(?P<target_user_id>[^/]*)"
73 )
74
75 def __init__(self, hs):
76 self.store = hs.get_datastore()
77 self.hs = hs
78 self.auth = hs.get_auth()
79 self.handlers = hs.get_handlers()
80
81 async def on_GET(self, request, target_user_id):
82 """Get request to get specific number of users from Synapse.
83 This needs user to have administrator access in Synapse.
84 """
85 await assert_requester_is_admin(self.auth, request)
86
87 target_user = UserID.from_string(target_user_id)
88
89 if not self.hs.is_mine(target_user):
90 raise SynapseError(400, "Can only users a local user")
91
92 order = "name" # order by name in user table
93 start = parse_integer(request, "start", required=True)
94 limit = parse_integer(request, "limit", required=True)
95
96 logger.info("limit: %s, start: %s", limit, start)
97
98 ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit)
99 return 200, ret
100
101 async def on_POST(self, request, target_user_id):
102 """Post request to get specific number of users from Synapse..
103 This needs user to have administrator access in Synapse.
104 Example:
105 http://localhost:8008/_synapse/admin/v1/users_paginate/
106 @admin:user?access_token=admin_access_token
107 JsonBodyToSend:
108 {
109 "start": "0",
110 "limit": "10
111 }
112 Returns:
113 200 OK with json object {list[dict[str, Any]], count} or empty object.
114 """
115 await assert_requester_is_admin(self.auth, request)
116 UserID.from_string(target_user_id)
117
118 order = "name" # order by name in user table
119 params = parse_json_object_from_request(request)
120 assert_params_in_dict(params, ["limit", "start"])
121 limit = params["limit"]
122 start = params["start"]
123 logger.info("limit: %s, start: %s", limit, start)
124
125 ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit)
126 return 200, ret
127
128
129 class UserRegisterServlet(RestServlet):
130 """
131 Attributes:
132 NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted
133 nonces (dict[str, int]): The nonces that we will accept. A dict of
134 nonce to the time it was generated, in int seconds.
135 """
136
137 PATTERNS = historical_admin_path_patterns("/register")
138 NONCE_TIMEOUT = 60
139
140 def __init__(self, hs):
141 self.handlers = hs.get_handlers()
142 self.reactor = hs.get_reactor()
143 self.nonces = {}
144 self.hs = hs
145
146 def _clear_old_nonces(self):
147 """
148 Clear out old nonces that are older than NONCE_TIMEOUT.
149 """
150 now = int(self.reactor.seconds())
151
152 for k, v in list(self.nonces.items()):
153 if now - v > self.NONCE_TIMEOUT:
154 del self.nonces[k]
155
156 def on_GET(self, request):
157 """
158 Generate a new nonce.
159 """
160 self._clear_old_nonces()
161
162 nonce = self.hs.get_secrets().token_hex(64)
163 self.nonces[nonce] = int(self.reactor.seconds())
164 return 200, {"nonce": nonce}
165
166 async def on_POST(self, request):
167 self._clear_old_nonces()
168
169 if not self.hs.config.registration_shared_secret:
170 raise SynapseError(400, "Shared secret registration is not enabled")
171
172 body = parse_json_object_from_request(request)
173
174 if "nonce" not in body:
175 raise SynapseError(400, "nonce must be specified", errcode=Codes.BAD_JSON)
176
177 nonce = body["nonce"]
178
179 if nonce not in self.nonces:
180 raise SynapseError(400, "unrecognised nonce")
181
182 # Delete the nonce, so it can't be reused, even if it's invalid
183 del self.nonces[nonce]
184
185 if "username" not in body:
186 raise SynapseError(
187 400, "username must be specified", errcode=Codes.BAD_JSON
188 )
189 else:
190 if (
191 not isinstance(body["username"], text_type)
192 or len(body["username"]) > 512
193 ):
194 raise SynapseError(400, "Invalid username")
195
196 username = body["username"].encode("utf-8")
197 if b"\x00" in username:
198 raise SynapseError(400, "Invalid username")
199
200 if "password" not in body:
201 raise SynapseError(
202 400, "password must be specified", errcode=Codes.BAD_JSON
203 )
204 else:
205 if (
206 not isinstance(body["password"], text_type)
207 or len(body["password"]) > 512
208 ):
209 raise SynapseError(400, "Invalid password")
210
211 password = body["password"].encode("utf-8")
212 if b"\x00" in password:
213 raise SynapseError(400, "Invalid password")
214
215 admin = body.get("admin", None)
216 user_type = body.get("user_type", None)
217
218 if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
219 raise SynapseError(400, "Invalid user type")
220
221 got_mac = body["mac"]
222
223 want_mac = hmac.new(
224 key=self.hs.config.registration_shared_secret.encode(),
225 digestmod=hashlib.sha1,
226 )
227 want_mac.update(nonce.encode("utf8"))
228 want_mac.update(b"\x00")
229 want_mac.update(username)
230 want_mac.update(b"\x00")
231 want_mac.update(password)
232 want_mac.update(b"\x00")
233 want_mac.update(b"admin" if admin else b"notadmin")
234 if user_type:
235 want_mac.update(b"\x00")
236 want_mac.update(user_type.encode("utf8"))
237 want_mac = want_mac.hexdigest()
238
239 if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")):
240 raise SynapseError(403, "HMAC incorrect")
241
242 # Reuse the parts of RegisterRestServlet to reduce code duplication
243 from synapse.rest.client.v2_alpha.register import RegisterRestServlet
244
245 register = RegisterRestServlet(self.hs)
246
247 user_id = await register.registration_handler.register_user(
248 localpart=body["username"].lower(),
249 password=body["password"],
250 admin=bool(admin),
251 user_type=user_type,
252 )
253
254 result = await register._create_registration_details(user_id, body)
255 return 200, result
256
257
258 class WhoisRestServlet(RestServlet):
259 PATTERNS = historical_admin_path_patterns("/whois/(?P<user_id>[^/]*)")
260
261 def __init__(self, hs):
262 self.hs = hs
263 self.auth = hs.get_auth()
264 self.handlers = hs.get_handlers()
265
266 async def on_GET(self, request, user_id):
267 target_user = UserID.from_string(user_id)
268 requester = await self.auth.get_user_by_req(request)
269 auth_user = requester.user
270
271 if target_user != auth_user:
272 await assert_user_is_admin(self.auth, auth_user)
273
274 if not self.hs.is_mine(target_user):
275 raise SynapseError(400, "Can only whois a local user")
276
277 ret = await self.handlers.admin_handler.get_whois(target_user)
278
279 return 200, ret
280
281
282 class DeactivateAccountRestServlet(RestServlet):
283 PATTERNS = historical_admin_path_patterns("/deactivate/(?P<target_user_id>[^/]*)")
284
285 def __init__(self, hs):
286 self._deactivate_account_handler = hs.get_deactivate_account_handler()
287 self.auth = hs.get_auth()
288
289 async def on_POST(self, request, target_user_id):
290 await assert_requester_is_admin(self.auth, request)
291 body = parse_json_object_from_request(request, allow_empty_body=True)
292 erase = body.get("erase", False)
293 if not isinstance(erase, bool):
294 raise SynapseError(
295 http_client.BAD_REQUEST,
296 "Param 'erase' must be a boolean, if given",
297 Codes.BAD_JSON,
298 )
299
300 UserID.from_string(target_user_id)
301
302 result = await self._deactivate_account_handler.deactivate_account(
303 target_user_id, erase
304 )
305 if result:
306 id_server_unbind_result = "success"
307 else:
308 id_server_unbind_result = "no-support"
309
310 return 200, {"id_server_unbind_result": id_server_unbind_result}
311
312
313 class AccountValidityRenewServlet(RestServlet):
314 PATTERNS = historical_admin_path_patterns("/account_validity/validity$")
315
316 def __init__(self, hs):
317 """
318 Args:
319 hs (synapse.server.HomeServer): server
320 """
321 self.hs = hs
322 self.account_activity_handler = hs.get_account_validity_handler()
323 self.auth = hs.get_auth()
324
325 async def on_POST(self, request):
326 await assert_requester_is_admin(self.auth, request)
327
328 body = parse_json_object_from_request(request)
329
330 if "user_id" not in body:
331 raise SynapseError(400, "Missing property 'user_id' in the request body")
332
333 expiration_ts = await self.account_activity_handler.renew_account_for_user(
334 body["user_id"],
335 body.get("expiration_ts"),
336 not body.get("enable_renewal_emails", True),
337 )
338
339 res = {"expiration_ts": expiration_ts}
340 return 200, res
341
342
343 class ResetPasswordRestServlet(RestServlet):
344 """Post request to allow an administrator reset password for a user.
345 This needs user to have administrator access in Synapse.
346 Example:
347 http://localhost:8008/_synapse/admin/v1/reset_password/
348 @user:to_reset_password?access_token=admin_access_token
349 JsonBodyToSend:
350 {
351 "new_password": "secret"
352 }
353 Returns:
354 200 OK with empty object if success otherwise an error.
355 """
356
357 PATTERNS = historical_admin_path_patterns(
358 "/reset_password/(?P<target_user_id>[^/]*)"
359 )
360
361 def __init__(self, hs):
362 self.store = hs.get_datastore()
363 self.hs = hs
364 self.auth = hs.get_auth()
365 self._set_password_handler = hs.get_set_password_handler()
366
367 async def on_POST(self, request, target_user_id):
368 """Post request to allow an administrator reset password for a user.
369 This needs user to have administrator access in Synapse.
370 """
371 requester = await self.auth.get_user_by_req(request)
372 await assert_user_is_admin(self.auth, requester.user)
373
374 UserID.from_string(target_user_id)
375
376 params = parse_json_object_from_request(request)
377 assert_params_in_dict(params, ["new_password"])
378 new_password = params["new_password"]
379
380 await self._set_password_handler.set_password(
381 target_user_id, new_password, requester
382 )
383 return 200, {}
384
385
386 class SearchUsersRestServlet(RestServlet):
387 """Get request to search user table for specific users according to
388 search term.
389 This needs user to have administrator access in Synapse.
390 Example:
391 http://localhost:8008/_synapse/admin/v1/search_users/
392 @admin:user?access_token=admin_access_token&term=alice
393 Returns:
394 200 OK with json object {list[dict[str, Any]], count} or empty object.
395 """
396
397 PATTERNS = historical_admin_path_patterns("/search_users/(?P<target_user_id>[^/]*)")
398
399 def __init__(self, hs):
400 self.store = hs.get_datastore()
401 self.hs = hs
402 self.auth = hs.get_auth()
403 self.handlers = hs.get_handlers()
404
405 async def on_GET(self, request, target_user_id):
406 """Get request to search user table for specific users according to
407 search term.
408 This needs user to have a administrator access in Synapse.
409 """
410 await assert_requester_is_admin(self.auth, request)
411
412 target_user = UserID.from_string(target_user_id)
413
414 # To allow all users to get the users list
415 # if not is_admin and target_user != auth_user:
416 # raise AuthError(403, "You are not a server admin")
417
418 if not self.hs.is_mine(target_user):
419 raise SynapseError(400, "Can only users a local user")
420
421 term = parse_string(request, "term", required=True)
422 logger.info("term: %s ", term)
423
424 ret = await self.handlers.admin_handler.search_users(term)
425 return 200, ret
24426
25427
26428 class UserAdminServlet(RestServlet):
9191 self.auth_handler = self.hs.get_auth_handler()
9292 self.registration_handler = hs.get_registration_handler()
9393 self.handlers = hs.get_handlers()
94 self._clock = hs.get_clock()
9495 self._well_known_builder = WellKnownBuilder(hs)
9596 self._address_ratelimiter = Ratelimiter()
97 self._account_ratelimiter = Ratelimiter()
98 self._failed_attempts_ratelimiter = Ratelimiter()
9699
97100 def on_GET(self, request):
98101 flows = []
201204 # (See add_threepid in synapse/handlers/auth.py)
202205 address = address.lower()
203206
207 # We also apply account rate limiting using the 3PID as a key, as
208 # otherwise using 3PID bypasses the ratelimiting based on user ID.
209 self._failed_attempts_ratelimiter.ratelimit(
210 (medium, address),
211 time_now_s=self._clock.time(),
212 rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
213 burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
214 update=False,
215 )
216
204217 # Check for login providers that support 3pid login types
205 canonical_user_id, callback_3pid = (
206 yield self.auth_handler.check_password_provider_3pid(
207 medium, address, login_submission["password"]
208 )
218 (
219 canonical_user_id,
220 callback_3pid,
221 ) = yield self.auth_handler.check_password_provider_3pid(
222 medium, address, login_submission["password"]
209223 )
210224 if canonical_user_id:
211225 # Authentication through password provider and 3pid succeeded
212 result = yield self._register_device_with_callback(
226
227 result = yield self._complete_login(
213228 canonical_user_id, login_submission, callback_3pid
214229 )
215230 return result
220235 medium, address
221236 )
222237 if not user_id:
223 logger.warn(
238 logger.warning(
224239 "unknown 3pid identifier medium %s, address %r", medium, address
240 )
241 # We mark that we've failed to log in here, as
242 # `check_password_provider_3pid` might have returned `None` due
243 # to an incorrect password, rather than the account not
244 # existing.
245 #
246 # If it returned None but the 3PID was bound then we won't hit
247 # this code path, which is fine as then the per-user ratelimit
248 # will kick in below.
249 self._failed_attempts_ratelimiter.can_do_action(
250 (medium, address),
251 time_now_s=self._clock.time(),
252 rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
253 burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
254 update=True,
225255 )
226256 raise LoginError(403, "", errcode=Codes.FORBIDDEN)
227257
234264 if "user" not in identifier:
235265 raise SynapseError(400, "User identifier is missing 'user' key")
236266
237 canonical_user_id, callback = yield self.auth_handler.validate_login(
238 identifier["user"], login_submission
239 )
240
241 result = yield self._register_device_with_callback(
267 if identifier["user"].startswith("@"):
268 qualified_user_id = identifier["user"]
269 else:
270 qualified_user_id = UserID(identifier["user"], self.hs.hostname).to_string()
271
272 # Check if we've hit the failed ratelimit (but don't update it)
273 self._failed_attempts_ratelimiter.ratelimit(
274 qualified_user_id.lower(),
275 time_now_s=self._clock.time(),
276 rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
277 burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
278 update=False,
279 )
280
281 try:
282 canonical_user_id, callback = yield self.auth_handler.validate_login(
283 identifier["user"], login_submission
284 )
285 except LoginError:
286 # The user has failed to log in, so we need to update the rate
287 # limiter. Using `can_do_action` avoids us raising a ratelimit
288 # exception and masking the LoginError. The actual ratelimiting
289 # should have happened above.
290 self._failed_attempts_ratelimiter.can_do_action(
291 qualified_user_id.lower(),
292 time_now_s=self._clock.time(),
293 rate_hz=self.hs.config.rc_login_failed_attempts.per_second,
294 burst_count=self.hs.config.rc_login_failed_attempts.burst_count,
295 update=True,
296 )
297 raise
298
299 result = yield self._complete_login(
242300 canonical_user_id, login_submission, callback
243301 )
244302 return result
245303
246304 @defer.inlineCallbacks
247 def _register_device_with_callback(self, user_id, login_submission, callback=None):
248 """ Registers a device with a given user_id. Optionally run a callback
249 function after registration has completed.
305 def _complete_login(
306 self, user_id, login_submission, callback=None, create_non_existant_users=False
307 ):
308 """Called when we've successfully authed the user and now need to
309 actually login them in (e.g. create devices). This gets called on
310 all succesful logins.
311
312 Applies the ratelimiting for succesful login attempts against an
313 account.
250314
251315 Args:
252316 user_id (str): ID of the user to register.
253317 login_submission (dict): Dictionary of login information.
254318 callback (func|None): Callback function to run after registration.
319 create_non_existant_users (bool): Whether to create the user if
320 they don't exist. Defaults to False.
255321
256322 Returns:
257323 result (Dict[str,str]): Dictionary of account information after
258324 successful registration.
259325 """
326
327 # Before we actually log them in we check if they've already logged in
328 # too often. This happens here rather than before as we don't
329 # necessarily know the user before now.
330 self._account_ratelimiter.ratelimit(
331 user_id.lower(),
332 time_now_s=self._clock.time(),
333 rate_hz=self.hs.config.rc_login_account.per_second,
334 burst_count=self.hs.config.rc_login_account.burst_count,
335 update=True,
336 )
337
338 if create_non_existant_users:
339 user_id = yield self.auth_handler.check_user_exists(user_id)
340 if not user_id:
341 user_id = yield self.registration_handler.register_user(
342 localpart=UserID.from_string(user_id).localpart
343 )
344
260345 device_id = login_submission.get("device_id")
261346 initial_display_name = login_submission.get("initial_device_display_name")
262347 device_id, access_token = yield self.registration_handler.register_device(
279364 def do_token_login(self, login_submission):
280365 token = login_submission["token"]
281366 auth_handler = self.auth_handler
282 user_id = (
283 yield auth_handler.validate_short_term_login_token_and_get_user_id(token)
284 )
285
286 result = yield self._register_device_with_callback(user_id, login_submission)
367 user_id = yield auth_handler.validate_short_term_login_token_and_get_user_id(
368 token
369 )
370
371 result = yield self._complete_login(user_id, login_submission)
287372 return result
288373
289374 @defer.inlineCallbacks
311396 raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED)
312397
313398 user_id = UserID(user, self.hs.hostname).to_string()
314
315 registered_user_id = yield self.auth_handler.check_user_exists(user_id)
316 if not registered_user_id:
317 registered_user_id = yield self.registration_handler.register_user(
318 localpart=user
319 )
320
321 result = yield self._register_device_with_callback(
322 registered_user_id, login_submission
399 result = yield self._complete_login(
400 user_id, login_submission, create_non_existant_users=True
323401 )
324402 return result
325403
379457 self.cas_displayname_attribute = hs.config.cas_displayname_attribute
380458 self.cas_required_attributes = hs.config.cas_required_attributes
381459 self._sso_auth_handler = SSOAuthHandler(hs)
382 self._http_client = hs.get_simple_http_client()
460 self._http_client = hs.get_proxied_http_client()
383461
384462 @defer.inlineCallbacks
385463 def on_GET(self, request):
1919 from six.moves.urllib import parse as urlparse
2020
2121 from canonicaljson import json
22
23 from twisted.internet import defer
2422
2523 from synapse.api.constants import EventTypes, Membership
2624 from synapse.api.errors import (
8482 set_tag("txn_id", txn_id)
8583 return self.txns.fetch_or_execute_request(request, self.on_POST, request)
8684
87 @defer.inlineCallbacks
88 def on_POST(self, request):
89 requester = yield self.auth.get_user_by_req(request)
90
91 info = yield self._room_creation_handler.create_room(
85 async def on_POST(self, request):
86 requester = await self.auth.get_user_by_req(request)
87
88 info = await self._room_creation_handler.create_room(
9289 requester, self.get_room_config(request)
9390 )
9491
153150 def on_PUT_no_state_key(self, request, room_id, event_type):
154151 return self.on_PUT(request, room_id, event_type, "")
155152
156 @defer.inlineCallbacks
157 def on_GET(self, request, room_id, event_type, state_key):
158 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
153 async def on_GET(self, request, room_id, event_type, state_key):
154 requester = await self.auth.get_user_by_req(request, allow_guest=True)
159155 format = parse_string(
160156 request, "format", default="content", allowed_values=["content", "event"]
161157 )
162158
163159 msg_handler = self.message_handler
164 data = yield msg_handler.get_room_data(
160 data = await msg_handler.get_room_data(
165161 user_id=requester.user.to_string(),
166162 room_id=room_id,
167163 event_type=event_type,
178174 elif format == "content":
179175 return 200, data.get_dict()["content"]
180176
181 @defer.inlineCallbacks
182 def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
183 requester = yield self.auth.get_user_by_req(request)
177 async def on_PUT(self, request, room_id, event_type, state_key, txn_id=None):
178 requester = await self.auth.get_user_by_req(request)
184179
185180 if txn_id:
186181 set_tag("txn_id", txn_id)
199194
200195 if event_type == EventTypes.Member:
201196 membership = content.get("membership", None)
202 event = yield self.room_member_handler.update_membership(
197 event = await self.room_member_handler.update_membership(
203198 requester,
204199 target=UserID.from_string(state_key),
205200 room_id=room_id,
207202 content=content,
208203 )
209204 else:
210 event = yield self.event_creation_handler.create_and_send_nonmember_event(
205 event = await self.event_creation_handler.create_and_send_nonmember_event(
211206 requester, event_dict, txn_id=txn_id
212207 )
213208
230225 PATTERNS = "/rooms/(?P<room_id>[^/]*)/send/(?P<event_type>[^/]*)"
231226 register_txn_path(self, PATTERNS, http_server, with_get=True)
232227
233 @defer.inlineCallbacks
234 def on_POST(self, request, room_id, event_type, txn_id=None):
235 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
228 async def on_POST(self, request, room_id, event_type, txn_id=None):
229 requester = await self.auth.get_user_by_req(request, allow_guest=True)
236230 content = parse_json_object_from_request(request)
237231
238232 event_dict = {
245239 if b"ts" in request.args and requester.app_service:
246240 event_dict["origin_server_ts"] = parse_integer(request, "ts", 0)
247241
248 event = yield self.event_creation_handler.create_and_send_nonmember_event(
242 event = await self.event_creation_handler.create_and_send_nonmember_event(
249243 requester, event_dict, txn_id=txn_id
250244 )
251245
275269 PATTERNS = "/join/(?P<room_identifier>[^/]*)"
276270 register_txn_path(self, PATTERNS, http_server)
277271
278 @defer.inlineCallbacks
279 def on_POST(self, request, room_identifier, txn_id=None):
280 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
272 async def on_POST(self, request, room_identifier, txn_id=None):
273 requester = await self.auth.get_user_by_req(request, allow_guest=True)
281274
282275 try:
283276 content = parse_json_object_from_request(request)
297290 elif RoomAlias.is_valid(room_identifier):
298291 handler = self.room_member_handler
299292 room_alias = RoomAlias.from_string(room_identifier)
300 room_id, remote_room_hosts = yield handler.lookup_room_alias(room_alias)
293 room_id, remote_room_hosts = await handler.lookup_room_alias(room_alias)
301294 room_id = room_id.to_string()
302295 else:
303296 raise SynapseError(
304297 400, "%s was not legal room ID or room alias" % (room_identifier,)
305298 )
306299
307 yield self.room_member_handler.update_membership(
300 await self.room_member_handler.update_membership(
308301 requester=requester,
309302 target=requester.user,
310303 room_id=room_id,
334327 self.hs = hs
335328 self.auth = hs.get_auth()
336329
337 @defer.inlineCallbacks
338 def on_GET(self, request):
330 async def on_GET(self, request):
339331 server = parse_string(request, "server", default=None)
340332
341333 try:
342 yield self.auth.get_user_by_req(request, allow_guest=True)
334 await self.auth.get_user_by_req(request, allow_guest=True)
343335 except InvalidClientCredentialsError as e:
344336 # Option to allow servers to require auth when accessing
345337 # /publicRooms via CS API. This is especially helpful in private
366358
367359 handler = self.hs.get_room_list_handler()
368360 if server:
369 data = yield handler.get_remote_public_room_list(
361 data = await handler.get_remote_public_room_list(
370362 server, limit=limit, since_token=since_token
371363 )
372364 else:
373 data = yield handler.get_local_public_room_list(
365 data = await handler.get_local_public_room_list(
374366 limit=limit, since_token=since_token
375367 )
376368
377369 return 200, data
378370
379 @defer.inlineCallbacks
380 def on_POST(self, request):
381 yield self.auth.get_user_by_req(request, allow_guest=True)
371 async def on_POST(self, request):
372 await self.auth.get_user_by_req(request, allow_guest=True)
382373
383374 server = parse_string(request, "server", default=None)
384375 content = parse_json_object_from_request(request)
407398
408399 handler = self.hs.get_room_list_handler()
409400 if server:
410 data = yield handler.get_remote_public_room_list(
401 data = await handler.get_remote_public_room_list(
411402 server,
412403 limit=limit,
413404 since_token=since_token,
416407 third_party_instance_id=third_party_instance_id,
417408 )
418409 else:
419 data = yield handler.get_local_public_room_list(
410 data = await handler.get_local_public_room_list(
420411 limit=limit,
421412 since_token=since_token,
422413 search_filter=search_filter,
435426 self.message_handler = hs.get_message_handler()
436427 self.auth = hs.get_auth()
437428
438 @defer.inlineCallbacks
439 def on_GET(self, request, room_id):
429 async def on_GET(self, request, room_id):
440430 # TODO support Pagination stream API (limit/tokens)
441 requester = yield self.auth.get_user_by_req(request)
431 requester = await self.auth.get_user_by_req(request)
442432 handler = self.message_handler
443433
444434 # request the state as of a given event, as identified by a stream token,
458448 membership = parse_string(request, "membership")
459449 not_membership = parse_string(request, "not_membership")
460450
461 events = yield handler.get_state_events(
451 events = await handler.get_state_events(
462452 room_id=room_id,
463453 user_id=requester.user.to_string(),
464454 at_token=at_token,
487477 self.message_handler = hs.get_message_handler()
488478 self.auth = hs.get_auth()
489479
490 @defer.inlineCallbacks
491 def on_GET(self, request, room_id):
492 requester = yield self.auth.get_user_by_req(request)
493
494 users_with_profile = yield self.message_handler.get_joined_members(
480 async def on_GET(self, request, room_id):
481 requester = await self.auth.get_user_by_req(request)
482
483 users_with_profile = await self.message_handler.get_joined_members(
495484 requester, room_id
496485 )
497486
507496 self.pagination_handler = hs.get_pagination_handler()
508497 self.auth = hs.get_auth()
509498
510 @defer.inlineCallbacks
511 def on_GET(self, request, room_id):
512 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
499 async def on_GET(self, request, room_id):
500 requester = await self.auth.get_user_by_req(request, allow_guest=True)
513501 pagination_config = PaginationConfig.from_request(request, default_limit=10)
514502 as_client_event = b"raw" not in request.args
515503 filter_bytes = parse_string(request, b"filter", encoding=None)
520508 as_client_event = False
521509 else:
522510 event_filter = None
523 msgs = yield self.pagination_handler.get_messages(
511 msgs = await self.pagination_handler.get_messages(
524512 room_id=room_id,
525513 requester=requester,
526514 pagin_config=pagination_config,
540528 self.message_handler = hs.get_message_handler()
541529 self.auth = hs.get_auth()
542530
543 @defer.inlineCallbacks
544 def on_GET(self, request, room_id):
545 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
531 async def on_GET(self, request, room_id):
532 requester = await self.auth.get_user_by_req(request, allow_guest=True)
546533 # Get all the current state for this room
547 events = yield self.message_handler.get_state_events(
534 events = await self.message_handler.get_state_events(
548535 room_id=room_id,
549536 user_id=requester.user.to_string(),
550537 is_guest=requester.is_guest,
561548 self.initial_sync_handler = hs.get_initial_sync_handler()
562549 self.auth = hs.get_auth()
563550
564 @defer.inlineCallbacks
565 def on_GET(self, request, room_id):
566 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
551 async def on_GET(self, request, room_id):
552 requester = await self.auth.get_user_by_req(request, allow_guest=True)
567553 pagination_config = PaginationConfig.from_request(request)
568 content = yield self.initial_sync_handler.room_initial_sync(
554 content = await self.initial_sync_handler.room_initial_sync(
569555 room_id=room_id, requester=requester, pagin_config=pagination_config
570556 )
571557 return 200, content
583569 self._event_serializer = hs.get_event_client_serializer()
584570 self.auth = hs.get_auth()
585571
586 @defer.inlineCallbacks
587 def on_GET(self, request, room_id, event_id):
588 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
572 async def on_GET(self, request, room_id, event_id):
573 requester = await self.auth.get_user_by_req(request, allow_guest=True)
589574 try:
590 event = yield self.event_handler.get_event(
575 event = await self.event_handler.get_event(
591576 requester.user, room_id, event_id
592577 )
593578 except AuthError:
598583
599584 time_now = self.clock.time_msec()
600585 if event:
601 event = yield self._event_serializer.serialize_event(event, time_now)
586 event = await self._event_serializer.serialize_event(event, time_now)
602587 return 200, event
603588
604589 return SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
616601 self._event_serializer = hs.get_event_client_serializer()
617602 self.auth = hs.get_auth()
618603
619 @defer.inlineCallbacks
620 def on_GET(self, request, room_id, event_id):
621 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
604 async def on_GET(self, request, room_id, event_id):
605 requester = await self.auth.get_user_by_req(request, allow_guest=True)
622606
623607 limit = parse_integer(request, "limit", default=10)
624608
630614 else:
631615 event_filter = None
632616
633 results = yield self.room_context_handler.get_event_context(
617 results = await self.room_context_handler.get_event_context(
634618 requester.user, room_id, event_id, limit, event_filter
635619 )
636620
638622 raise SynapseError(404, "Event not found.", errcode=Codes.NOT_FOUND)
639623
640624 time_now = self.clock.time_msec()
641 results["events_before"] = yield self._event_serializer.serialize_events(
625 results["events_before"] = await self._event_serializer.serialize_events(
642626 results["events_before"], time_now
643627 )
644 results["event"] = yield self._event_serializer.serialize_event(
628 results["event"] = await self._event_serializer.serialize_event(
645629 results["event"], time_now
646630 )
647 results["events_after"] = yield self._event_serializer.serialize_events(
631 results["events_after"] = await self._event_serializer.serialize_events(
648632 results["events_after"], time_now
649633 )
650 results["state"] = yield self._event_serializer.serialize_events(
634 results["state"] = await self._event_serializer.serialize_events(
651635 results["state"], time_now
652636 )
653637
664648 PATTERNS = "/rooms/(?P<room_id>[^/]*)/forget"
665649 register_txn_path(self, PATTERNS, http_server)
666650
667 @defer.inlineCallbacks
668 def on_POST(self, request, room_id, txn_id=None):
669 requester = yield self.auth.get_user_by_req(request, allow_guest=False)
670
671 yield self.room_member_handler.forget(user=requester.user, room_id=room_id)
651 async def on_POST(self, request, room_id, txn_id=None):
652 requester = await self.auth.get_user_by_req(request, allow_guest=False)
653
654 await self.room_member_handler.forget(user=requester.user, room_id=room_id)
672655
673656 return 200, {}
674657
695678 )
696679 register_txn_path(self, PATTERNS, http_server)
697680
698 @defer.inlineCallbacks
699 def on_POST(self, request, room_id, membership_action, txn_id=None):
700 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
681 async def on_POST(self, request, room_id, membership_action, txn_id=None):
682 requester = await self.auth.get_user_by_req(request, allow_guest=True)
701683
702684 if requester.is_guest and membership_action not in {
703685 Membership.JOIN,
713695 content = {}
714696
715697 if membership_action == "invite" and self._has_3pid_invite_keys(content):
716 yield self.room_member_handler.do_3pid_invite(
698 await self.room_member_handler.do_3pid_invite(
717699 room_id,
718700 requester.user,
719701 content["medium"],
734716 if "reason" in content and membership_action in ["kick", "ban"]:
735717 event_content = {"reason": content["reason"]}
736718
737 yield self.room_member_handler.update_membership(
719 await self.room_member_handler.update_membership(
738720 requester=requester,
739721 target=target,
740722 room_id=room_id,
776758 PATTERNS = "/rooms/(?P<room_id>[^/]*)/redact/(?P<event_id>[^/]*)"
777759 register_txn_path(self, PATTERNS, http_server)
778760
779 @defer.inlineCallbacks
780 def on_POST(self, request, room_id, event_id, txn_id=None):
781 requester = yield self.auth.get_user_by_req(request)
761 async def on_POST(self, request, room_id, event_id, txn_id=None):
762 requester = await self.auth.get_user_by_req(request)
782763 content = parse_json_object_from_request(request)
783764
784 event = yield self.event_creation_handler.create_and_send_nonmember_event(
765 event = await self.event_creation_handler.create_and_send_nonmember_event(
785766 requester,
786767 {
787768 "type": EventTypes.Redaction,
815796 self.typing_handler = hs.get_typing_handler()
816797 self.auth = hs.get_auth()
817798
818 @defer.inlineCallbacks
819 def on_PUT(self, request, room_id, user_id):
820 requester = yield self.auth.get_user_by_req(request)
799 async def on_PUT(self, request, room_id, user_id):
800 requester = await self.auth.get_user_by_req(request)
821801
822802 room_id = urlparse.unquote(room_id)
823803 target_user = UserID.from_string(urlparse.unquote(user_id))
824804
825805 content = parse_json_object_from_request(request)
826806
827 yield self.presence_handler.bump_presence_active_time(requester.user)
807 await self.presence_handler.bump_presence_active_time(requester.user)
828808
829809 # Limit timeout to stop people from setting silly typing timeouts.
830810 timeout = min(content.get("timeout", 30000), 120000)
831811
832812 if content["typing"]:
833 yield self.typing_handler.started_typing(
813 await self.typing_handler.started_typing(
834814 target_user=target_user,
835815 auth_user=requester.user,
836816 room_id=room_id,
837817 timeout=timeout,
838818 )
839819 else:
840 yield self.typing_handler.stopped_typing(
820 await self.typing_handler.stopped_typing(
841821 target_user=target_user, auth_user=requester.user, room_id=room_id
842822 )
843823
852832 self.handlers = hs.get_handlers()
853833 self.auth = hs.get_auth()
854834
855 @defer.inlineCallbacks
856 def on_POST(self, request):
857 requester = yield self.auth.get_user_by_req(request)
835 async def on_POST(self, request):
836 requester = await self.auth.get_user_by_req(request)
858837
859838 content = parse_json_object_from_request(request)
860839
861840 batch = parse_string(request, "next_batch")
862 results = yield self.handlers.search_handler.search(
841 results = await self.handlers.search_handler.search(
863842 requester.user, content, batch
864843 )
865844
874853 self.store = hs.get_datastore()
875854 self.auth = hs.get_auth()
876855
877 @defer.inlineCallbacks
878 def on_GET(self, request):
879 requester = yield self.auth.get_user_by_req(request, allow_guest=True)
880
881 room_ids = yield self.store.get_rooms_for_user(requester.user.to_string())
856 async def on_GET(self, request):
857 requester = await self.auth.get_user_by_req(request, allow_guest=True)
858
859 room_ids = await self.store.get_rooms_for_user(requester.user.to_string())
882860 return 200, {"joined_rooms": list(room_ids)}
883861
884862
7070 def on_POST(self, request):
7171 if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
7272 if self.config.local_threepid_handling_disabled_due_to_email_config:
73 logger.warn(
73 logger.warning(
7474 "User password resets have been disabled due to lack of email config"
7575 )
7676 raise SynapseError(
147147 self.clock = hs.get_clock()
148148 self.store = hs.get_datastore()
149149 if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
150 self.failure_email_template, = load_jinja2_templates(
150 (self.failure_email_template,) = load_jinja2_templates(
151151 self.config.email_template_dir,
152152 [self.config.email_password_reset_template_failure_html],
153153 )
161161 )
162162 if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
163163 if self.config.local_threepid_handling_disabled_due_to_email_config:
164 logger.warn(
164 logger.warning(
165165 "Password reset emails have been disabled due to lack of an email config"
166166 )
167167 raise SynapseError(
182182 # Perform a 302 redirect if next_link is set
183183 if next_link:
184184 if next_link.startswith("file:///"):
185 logger.warn(
185 logger.warning(
186186 "Not redirecting to next_link as it is a local file: address"
187187 )
188188 else:
349349 def on_POST(self, request):
350350 if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
351351 if self.config.local_threepid_handling_disabled_due_to_email_config:
352 logger.warn(
352 logger.warning(
353353 "Adding emails have been disabled due to lack of an email config"
354354 )
355355 raise SynapseError(
440440 raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE)
441441
442442 if not self.hs.config.account_threepid_delegate_msisdn:
443 logger.warn(
443 logger.warning(
444444 "No upstream msisdn account_threepid_delegate configured on the server to "
445445 "handle this request"
446446 )
478478 self.clock = hs.get_clock()
479479 self.store = hs.get_datastore()
480480 if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
481 self.failure_email_template, = load_jinja2_templates(
481 (self.failure_email_template,) = load_jinja2_templates(
482482 self.config.email_template_dir,
483483 [self.config.email_add_threepid_template_failure_html],
484484 )
487487 def on_GET(self, request):
488488 if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
489489 if self.config.local_threepid_handling_disabled_due_to_email_config:
490 logger.warn(
490 logger.warning(
491491 "Adding emails have been disabled due to lack of an email config"
492492 )
493493 raise SynapseError(
514514 # Perform a 302 redirect if next_link is set
515515 if next_link:
516516 if next_link.startswith("file:///"):
517 logger.warn(
517 logger.warning(
518518 "Not redirecting to next_link as it is a local file: address"
519519 )
520520 else:
1313 # limitations under the License.
1414
1515 import logging
16
17 from twisted.internet import defer
1816
1917 from synapse.http.servlet import RestServlet, parse_json_object_from_request
2018
3331 self.read_marker_handler = hs.get_read_marker_handler()
3432 self.presence_handler = hs.get_presence_handler()
3533
36 @defer.inlineCallbacks
37 def on_POST(self, request, room_id):
38 requester = yield self.auth.get_user_by_req(request)
34 async def on_POST(self, request, room_id):
35 requester = await self.auth.get_user_by_req(request)
3936
40 yield self.presence_handler.bump_presence_active_time(requester.user)
37 await self.presence_handler.bump_presence_active_time(requester.user)
4138
4239 body = parse_json_object_from_request(request)
4340
4441 read_event_id = body.get("m.read", None)
4542 if read_event_id:
46 yield self.receipts_handler.received_client_receipt(
43 await self.receipts_handler.received_client_receipt(
4744 room_id,
4845 "m.read",
4946 user_id=requester.user.to_string(),
5249
5350 read_marker_event_id = body.get("m.fully_read", None)
5451 if read_marker_event_id:
55 yield self.read_marker_handler.received_client_read_marker(
52 await self.read_marker_handler.received_client_read_marker(
5653 room_id,
5754 user_id=requester.user.to_string(),
5855 event_id=read_marker_event_id,
1313 # limitations under the License.
1414
1515 import logging
16
17 from twisted.internet import defer
1816
1917 from synapse.api.errors import SynapseError
2018 from synapse.http.servlet import RestServlet
3836 self.receipts_handler = hs.get_receipts_handler()
3937 self.presence_handler = hs.get_presence_handler()
4038
41 @defer.inlineCallbacks
42 def on_POST(self, request, room_id, receipt_type, event_id):
43 requester = yield self.auth.get_user_by_req(request)
39 async def on_POST(self, request, room_id, receipt_type, event_id):
40 requester = await self.auth.get_user_by_req(request)
4441
4542 if receipt_type != "m.read":
4643 raise SynapseError(400, "Receipt type must be 'm.read'")
4744
48 yield self.presence_handler.bump_presence_active_time(requester.user)
45 await self.presence_handler.bump_presence_active_time(requester.user)
4946
50 yield self.receipts_handler.received_client_receipt(
47 await self.receipts_handler.received_client_receipt(
5148 room_id, receipt_type, user_id=requester.user.to_string(), event_id=event_id
5249 )
5350
105105 def on_POST(self, request):
106106 if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
107107 if self.hs.config.local_threepid_handling_disabled_due_to_email_config:
108 logger.warn(
108 logger.warning(
109109 "Email registration has been disabled due to lack of email config"
110110 )
111111 raise SynapseError(
206206 )
207207
208208 if not self.hs.config.account_threepid_delegate_msisdn:
209 logger.warn(
209 logger.warning(
210210 "No upstream msisdn account_threepid_delegate configured on the server to "
211211 "handle this request"
212212 )
246246 self.store = hs.get_datastore()
247247
248248 if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
249 self.failure_email_template, = load_jinja2_templates(
249 (self.failure_email_template,) = load_jinja2_templates(
250250 self.config.email_template_dir,
251251 [self.config.email_registration_template_failure_html],
252252 )
253253
254254 if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
255 self.failure_email_template, = load_jinja2_templates(
255 (self.failure_email_template,) = load_jinja2_templates(
256256 self.config.email_template_dir,
257257 [self.config.email_registration_template_failure_html],
258258 )
265265 )
266266 if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
267267 if self.config.local_threepid_handling_disabled_due_to_email_config:
268 logger.warn(
268 logger.warning(
269269 "User registration via email has been disabled due to lack of email config"
270270 )
271271 raise SynapseError(
286286 # Perform a 302 redirect if next_link is set
287287 if next_link:
288288 if next_link.startswith("file:///"):
289 logger.warn(
289 logger.warning(
290290 "Not redirecting to next_link as it is a local file: address"
291291 )
292292 else:
479479 # a password to work around a client bug where it sent
480480 # the 'initial_device_display_name' param alone, wiping out
481481 # the original registration params
482 logger.warn("Ignoring initial_device_display_name without password")
482 logger.warning("Ignoring initial_device_display_name without password")
483483 del body["initial_device_display_name"]
484484
485485 session_id = self.auth_handler.get_session_id(body)
111111 full_state = parse_boolean(request, "full_state", default=False)
112112
113113 logger.debug(
114 "/sync: user=%r, timeout=%r, since=%r,"
115 " set_presence=%r, filter_id=%r, device_id=%r"
116 % (user, timeout, since, set_presence, filter_id, device_id)
114 "/sync: user=%r, timeout=%r, since=%r, "
115 "set_presence=%r, filter_id=%r, device_id=%r",
116 user,
117 timeout,
118 since,
119 set_presence,
120 filter_id,
121 device_id,
117122 )
118123
119124 request_key = (user, timeout, since, filter_id, full_state, device_id)
388393 # We've had bug reports that events were coming down under the
389394 # wrong room.
390395 if event.room_id != room.room_id:
391 logger.warn(
396 logger.warning(
392397 "Event %r is under room %r instead of %r",
393398 event.event_id,
394399 room.room_id,
6464 "m.require_identity_server": False,
6565 # as per MSC2290
6666 "m.separate_add_and_bind": True,
67 # Implements support for label-based filtering as described in
68 # MSC2326.
69 "org.matrix.label_based_filtering": True,
6770 },
6871 },
6972 )
101101 @wrap_json_request_handler
102102 async def _async_render_GET(self, request):
103103 if len(request.postpath) == 1:
104 server, = request.postpath
104 (server,) = request.postpath
105105 query = {server.decode("ascii"): {}}
106106 elif len(request.postpath) == 2:
107107 server, key_id = request.postpath
362362 },
363363 )
364364 except RequestSendFailed as e:
365 logger.warn(
365 logger.warning(
366366 "Request failed fetching remote media %s/%s: %r",
367367 server_name,
368368 media_id,
371371 raise SynapseError(502, "Failed to fetch remote media")
372372
373373 except HttpResponseException as e:
374 logger.warn(
374 logger.warning(
375375 "HTTP error fetching remote media %s/%s: %s",
376376 server_name,
377377 media_id,
382382 raise SynapseError(502, "Failed to fetch remote media")
383383
384384 except SynapseError:
385 logger.warn("Failed to fetch remote media %s/%s", server_name, media_id)
385 logger.warning(
386 "Failed to fetch remote media %s/%s", server_name, media_id
387 )
386388 raise
387389 except NotRetryingDestination:
388 logger.warn("Not retrying destination %r", server_name)
390 logger.warning("Not retrying destination %r", server_name)
389391 raise SynapseError(502, "Failed to fetch remote media")
390392 except Exception:
391393 logger.exception(
690692 try:
691693 os.remove(full_path)
692694 except OSError as e:
693 logger.warn("Failed to remove file: %r", full_path)
695 logger.warning("Failed to remove file: %r", full_path)
694696 if e.errno == errno.ENOENT:
695697 pass
696698 else:
7676 treq_args={"browser_like_redirects": True},
7777 ip_whitelist=hs.config.url_preview_ip_range_whitelist,
7878 ip_blacklist=hs.config.url_preview_ip_range_blacklist,
79 http_proxy=os.getenv("http_proxy"),
80 https_proxy=os.getenv("HTTPS_PROXY"),
7981 )
8082 self.media_repo = media_repo
8183 self.primary_base_path = media_repo.primary_base_path
119121 pattern = entry[attrib]
120122 value = getattr(url_tuple, attrib)
121123 logger.debug(
122 ("Matching attrib '%s' with value '%s' against" " pattern '%s'")
123 % (attrib, value, pattern)
124 "Matching attrib '%s' with value '%s' against" " pattern '%s'",
125 attrib,
126 value,
127 pattern,
124128 )
125129
126130 if value is None:
136140 match = False
137141 continue
138142 if match:
139 logger.warn("URL %s blocked by url_blacklist entry %s", url, entry)
143 logger.warning("URL %s blocked by url_blacklist entry %s", url, entry)
140144 raise SynapseError(
141145 403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN
142146 )
188192
189193 media_info = yield self._download_url(url, user)
190194
191 logger.debug("got media_info of '%s'" % media_info)
195 logger.debug("got media_info of '%s'", media_info)
192196
193197 if _is_media(media_info["media_type"]):
194198 file_id = media_info["filesystem_id"]
208212 og["og:image:width"] = dims["width"]
209213 og["og:image:height"] = dims["height"]
210214 else:
211 logger.warn("Couldn't get dims for %s" % url)
215 logger.warning("Couldn't get dims for %s" % url)
212216
213217 # define our OG response for this media
214218 elif _is_html(media_info["media_type"]):
256260 og["og:image:width"] = dims["width"]
257261 og["og:image:height"] = dims["height"]
258262 else:
259 logger.warn("Couldn't get dims for %s" % og["og:image"])
263 logger.warning("Couldn't get dims for %s", og["og:image"])
260264
261265 og["og:image"] = "mxc://%s/%s" % (
262266 self.server_name,
267271 else:
268272 del og["og:image"]
269273 else:
270 logger.warn("Failed to find any OG data in %s", url)
274 logger.warning("Failed to find any OG data in %s", url)
271275 og = {}
272276
273277 # filter out any stupidly long values
282286 for k in keys_to_remove:
283287 del og[k]
284288
285 logger.debug("Calculated OG for %s as %s" % (url, og))
289 logger.debug("Calculated OG for %s as %s", url, og)
286290
287291 jsonog = json.dumps(og)
288292
311315
312316 with self.media_storage.store_into_file(file_info) as (f, fname, finish):
313317 try:
314 logger.debug("Trying to get url '%s'" % url)
318 logger.debug("Trying to get url '%s'", url)
315319 length, headers, uri, code = yield self.client.get_file(
316320 url, output_stream=f, max_size=self.max_spider_size
317321 )
331335 )
332336 except Exception as e:
333337 # FIXME: pass through 404s and other error messages nicely
334 logger.warn("Error downloading %s: %r", url, e)
338 logger.warning("Error downloading %s: %r", url, e)
335339
336340 raise SynapseError(
337341 500,
412416 except OSError as e:
413417 # If the path doesn't exist, meh
414418 if e.errno != errno.ENOENT:
415 logger.warn("Failed to remove media: %r: %s", media_id, e)
419 logger.warning("Failed to remove media: %r: %s", media_id, e)
416420 continue
417421
418422 removed_media.append(media_id)
444448 except OSError as e:
445449 # If the path doesn't exist, meh
446450 if e.errno != errno.ENOENT:
447 logger.warn("Failed to remove media: %r: %s", media_id, e)
451 logger.warning("Failed to remove media: %r: %s", media_id, e)
448452 continue
449453
450454 try:
460464 except OSError as e:
461465 # If the path doesn't exist, meh
462466 if e.errno != errno.ENOENT:
463 logger.warn("Failed to remove media: %r: %s", media_id, e)
467 logger.warning("Failed to remove media: %r: %s", media_id, e)
464468 continue
465469
466470 removed_media.append(media_id)
181181 if file_path:
182182 yield respond_with_file(request, desired_type, file_path)
183183 else:
184 logger.warn("Failed to generate thumbnail")
184 logger.warning("Failed to generate thumbnail")
185185 respond_404(request)
186186
187187 @defer.inlineCallbacks
244244 if file_path:
245245 yield respond_with_file(request, desired_type, file_path)
246246 else:
247 logger.warn("Failed to generate thumbnail")
247 logger.warning("Failed to generate thumbnail")
248248 respond_404(request)
249249
250250 @defer.inlineCallbacks
2222 # Imports required for the default HomeServer() implementation
2323 import abc
2424 import logging
25 import os
2526
2627 from twisted.enterprise import adbapi
2728 from twisted.mail.smtp import sendmail
9495 WorkerServerNoticesSender,
9596 )
9697 from synapse.state import StateHandler, StateResolutionHandler
98 from synapse.storage import DataStores, Storage
9799 from synapse.streams.events import EventSources
98100 from synapse.util import Clock
99101 from synapse.util.distributor import Distributor
166168 "filtering",
167169 "http_client_context_factory",
168170 "simple_http_client",
171 "proxied_http_client",
169172 "media_repository",
170173 "media_repository_resource",
171174 "federation_transport_client",
195198 "account_validity_handler",
196199 "saml_handler",
197200 "event_client_serializer",
201 "storage",
198202 ]
199203
200204 REQUIRED_ON_MASTER_STARTUP = ["user_directory_handler", "stats_handler"]
216220 self.hostname = hostname
217221 self._building = {}
218222 self._listening_services = []
223 self.start_time = None
219224
220225 self.clock = Clock(reactor)
221226 self.distributor = Distributor()
223228 self.admin_redaction_ratelimiter = Ratelimiter()
224229 self.registration_ratelimiter = Ratelimiter()
225230
226 self.datastore = None
231 self.datastores = None
227232
228233 # Other kwargs are explicit dependencies
229234 for depname in kwargs:
232237 def setup(self):
233238 logger.info("Setting up.")
234239 with self.get_db_conn() as conn:
235 self.datastore = self.DATASTORE_CLASS(conn, self)
240 datastore = self.DATASTORE_CLASS(conn, self)
241 self.datastores = DataStores(datastore, conn, self)
236242 conn.commit()
243 self.start_time = int(self.get_clock().time())
237244 logger.info("Finished setting up.")
238245
239246 def setup_master(self):
265272 return self.clock
266273
267274 def get_datastore(self):
268 return self.datastore
275 return self.datastores.main
269276
270277 def get_config(self):
271278 return self.config
306313
307314 def build_simple_http_client(self):
308315 return SimpleHttpClient(self)
316
317 def build_proxied_http_client(self):
318 return SimpleHttpClient(
319 self,
320 http_proxy=os.getenv("http_proxy"),
321 https_proxy=os.getenv("HTTPS_PROXY"),
322 )
309323
310324 def build_room_creation_handler(self):
311325 return RoomCreationHandler(self)
536550 def build_event_client_serializer(self):
537551 return EventClientSerializer(self)
538552
553 def build_storage(self) -> Storage:
554 return Storage(self, self.datastores)
555
539556 def remove_pusher(self, app_id, push_key, user_id):
540557 return self.get_pusherpool().remove_pusher(app_id, push_key, user_id)
541558
1111 import synapse.handlers.room
1212 import synapse.handlers.room_member
1313 import synapse.handlers.set_password
14 import synapse.http.client
1415 import synapse.rest.media.v1.media_repository
1516 import synapse.server_notices.server_notices_manager
1617 import synapse.server_notices.server_notices_sender
3738 pass
3839 def get_state_resolution_handler(self) -> synapse.state.StateResolutionHandler:
3940 pass
41 def get_simple_http_client(self) -> synapse.http.client.SimpleHttpClient:
42 """Fetch an HTTP client implementation which doesn't do any blacklisting
43 or support any HTTP_PROXY settings"""
44 pass
45 def get_proxied_http_client(self) -> synapse.http.client.SimpleHttpClient:
46 """Fetch an HTTP client implementation which doesn't do any blacklisting
47 but does support HTTP_PROXY settings"""
48 pass
4049 def get_deactivate_account_handler(
41 self
50 self,
4251 ) -> synapse.handlers.deactivate_account.DeactivateAccountHandler:
4352 pass
4453 def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler:
4655 def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler:
4756 pass
4857 def get_event_creation_handler(
49 self
58 self,
5059 ) -> synapse.handlers.message.EventCreationHandler:
5160 pass
5261 def get_set_password_handler(
53 self
62 self,
5463 ) -> synapse.handlers.set_password.SetPasswordHandler:
5564 pass
5665 def get_federation_sender(self) -> synapse.federation.sender.FederationSender:
5766 pass
5867 def get_federation_transport_client(
59 self
68 self,
6069 ) -> synapse.federation.transport.client.TransportLayerClient:
6170 pass
6271 def get_media_repository_resource(
63 self
72 self,
6473 ) -> synapse.rest.media.v1.media_repository.MediaRepositoryResource:
6574 pass
6675 def get_media_repository(
67 self
76 self,
6877 ) -> synapse.rest.media.v1.media_repository.MediaRepository:
6978 pass
7079 def get_server_notices_manager(
71 self
80 self,
7281 ) -> synapse.server_notices.server_notices_manager.ServerNoticesManager:
7382 pass
7483 def get_server_notices_sender(
75 self
84 self,
7685 ) -> synapse.server_notices.server_notices_sender.ServerNoticesSender:
7786 pass
8282 room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id)
8383
8484 if not room_id:
85 logger.warn("Failed to get server notices room")
85 logger.warning("Failed to get server notices room")
8686 return
8787
8888 yield self._check_and_set_tags(user_id, room_id)
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 The Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15
16 from twisted.internet import defer
17
18 from synapse.storage.state import StateFilter
19
20 logger = logging.getLogger(__name__)
21
22
23 class SpamCheckerApi(object):
24 """A proxy object that gets passed to spam checkers so they can get
25 access to rooms and other relevant information.
26 """
27
28 def __init__(self, hs):
29 self.hs = hs
30
31 self._store = hs.get_datastore()
32
33 @defer.inlineCallbacks
34 def get_state_events_in_room(self, room_id, types):
35 """Gets state events for the given room.
36
37 Args:
38 room_id (string): The room ID to get state events in.
39 types (tuple): The event type and state key (using None
40 to represent 'any') of the room state to acquire.
41
42 Returns:
43 twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]:
44 The filtered state events in the room.
45 """
46 state_ids = yield self._store.get_filtered_current_state_ids(
47 room_id=room_id, state_filter=StateFilter.from_types(types)
48 )
49 state = yield self._store.get_events(state_ids.values())
50 return state.values()
1515
1616 import logging
1717 from collections import namedtuple
18 from typing import Iterable, Optional
1819
1920 from six import iteritems, itervalues
2021
2627
2728 from synapse.api.constants import EventTypes
2829 from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions
30 from synapse.events import EventBase
2931 from synapse.events.snapshot import EventContext
3032 from synapse.logging.utils import log_function
3133 from synapse.state import v1, v2
102104 def __init__(self, hs):
103105 self.clock = hs.get_clock()
104106 self.store = hs.get_datastore()
107 self.state_store = hs.get_storage().state
105108 self.hs = hs
106109 self._state_resolution_handler = hs.get_state_resolution_handler()
107110
210213 return joined_hosts
211214
212215 @defer.inlineCallbacks
213 def compute_event_context(self, event, old_state=None):
216 def compute_event_context(
217 self, event: EventBase, old_state: Optional[Iterable[EventBase]] = None
218 ):
214219 """Build an EventContext structure for the event.
215220
216221 This works out what the current state should be for the event, and
217222 generates a new state group if necessary.
218223
219224 Args:
220 event (synapse.events.EventBase):
221 old_state (dict|None): The state at the event if it can't be
225 event:
226 old_state: The state at the event if it can't be
222227 calculated from existing events. This is normally only specified
223228 when receiving an event from federation where we don't have the
224229 prev events for, e.g. when backfilling.
230235 # If this is an outlier, then we know it shouldn't have any current
231236 # state. Certainly store.get_current_state won't return any, and
232237 # persisting the event won't store the state group.
238
239 # FIXME: why do we populate current_state_ids? I thought the point was
240 # that we weren't supposed to have any state for outliers?
233241 if old_state:
234242 prev_state_ids = {(s.type, s.state_key): s.event_id for s in old_state}
235243 if event.is_state():
246254 # group for it.
247255 context = EventContext.with_state(
248256 state_group=None,
257 state_group_before_event=None,
249258 current_state_ids=current_state_ids,
250259 prev_state_ids=prev_state_ids,
251260 )
252261
253262 return context
254263
264 #
265 # first of all, figure out the state before the event
266 #
267
255268 if old_state:
256 # We already have the state, so we don't need to calculate it.
257 # Let's just correctly fill out the context and create a
258 # new state group for it.
259
260 prev_state_ids = {(s.type, s.state_key): s.event_id for s in old_state}
261
262 if event.is_state():
263 key = (event.type, event.state_key)
264 if key in prev_state_ids:
265 replaces = prev_state_ids[key]
266 if replaces != event.event_id: # Paranoia check
267 event.unsigned["replaces_state"] = replaces
268 current_state_ids = dict(prev_state_ids)
269 current_state_ids[key] = event.event_id
270 else:
271 current_state_ids = prev_state_ids
272
273 state_group = yield self.store.store_state_group(
269 # if we're given the state before the event, then we use that
270 state_ids_before_event = {
271 (s.type, s.state_key): s.event_id for s in old_state
272 }
273 state_group_before_event = None
274 state_group_before_event_prev_group = None
275 deltas_to_state_group_before_event = None
276
277 else:
278 # otherwise, we'll need to resolve the state across the prev_events.
279 logger.debug("calling resolve_state_groups from compute_event_context")
280
281 entry = yield self.resolve_state_groups_for_events(
282 event.room_id, event.prev_event_ids()
283 )
284
285 state_ids_before_event = entry.state
286 state_group_before_event = entry.state_group
287 state_group_before_event_prev_group = entry.prev_group
288 deltas_to_state_group_before_event = entry.delta_ids
289
290 #
291 # make sure that we have a state group at that point. If it's not a state event,
292 # that will be the state group for the new event. If it *is* a state event,
293 # it might get rejected (in which case we'll need to persist it with the
294 # previous state group)
295 #
296
297 if not state_group_before_event:
298 state_group_before_event = yield self.state_store.store_state_group(
274299 event.event_id,
275300 event.room_id,
276 prev_group=None,
277 delta_ids=None,
278 current_state_ids=current_state_ids,
301 prev_group=state_group_before_event_prev_group,
302 delta_ids=deltas_to_state_group_before_event,
303 current_state_ids=state_ids_before_event,
279304 )
280305
281 context = EventContext.with_state(
282 state_group=state_group,
283 current_state_ids=current_state_ids,
284 prev_state_ids=prev_state_ids,
306 # XXX: can we update the state cache entry for the new state group? or
307 # could we set a flag on resolve_state_groups_for_events to tell it to
308 # always make a state group?
309
310 #
311 # now if it's not a state event, we're done
312 #
313
314 if not event.is_state():
315 return EventContext.with_state(
316 state_group_before_event=state_group_before_event,
317 state_group=state_group_before_event,
318 current_state_ids=state_ids_before_event,
319 prev_state_ids=state_ids_before_event,
320 prev_group=state_group_before_event_prev_group,
321 delta_ids=deltas_to_state_group_before_event,
285322 )
286323
287 return context
288
289 logger.debug("calling resolve_state_groups from compute_event_context")
290
291 entry = yield self.resolve_state_groups_for_events(
292 event.room_id, event.prev_event_ids()
293 )
294
295 prev_state_ids = entry.state
296 prev_group = None
297 delta_ids = None
298
299 if event.is_state():
300 # If this is a state event then we need to create a new state
301 # group for the state after this event.
302
303 key = (event.type, event.state_key)
304 if key in prev_state_ids:
305 replaces = prev_state_ids[key]
324 #
325 # otherwise, we'll need to create a new state group for after the event
326 #
327
328 key = (event.type, event.state_key)
329 if key in state_ids_before_event:
330 replaces = state_ids_before_event[key]
331 if replaces != event.event_id:
306332 event.unsigned["replaces_state"] = replaces
307333
308 current_state_ids = dict(prev_state_ids)
309 current_state_ids[key] = event.event_id
310
311 if entry.state_group:
312 # If the state at the event has a state group assigned then
313 # we can use that as the prev group
314 prev_group = entry.state_group
315 delta_ids = {key: event.event_id}
316 elif entry.prev_group:
317 # If the state at the event only has a prev group, then we can
318 # use that as a prev group too.
319 prev_group = entry.prev_group
320 delta_ids = dict(entry.delta_ids)
321 delta_ids[key] = event.event_id
322
323 state_group = yield self.store.store_state_group(
324 event.event_id,
325 event.room_id,
326 prev_group=prev_group,
327 delta_ids=delta_ids,
328 current_state_ids=current_state_ids,
329 )
330 else:
331 current_state_ids = prev_state_ids
332 prev_group = entry.prev_group
333 delta_ids = entry.delta_ids
334
335 if entry.state_group is None:
336 entry.state_group = yield self.store.store_state_group(
337 event.event_id,
338 event.room_id,
339 prev_group=entry.prev_group,
340 delta_ids=entry.delta_ids,
341 current_state_ids=current_state_ids,
342 )
343 entry.state_id = entry.state_group
344
345 state_group = entry.state_group
346
347 context = EventContext.with_state(
348 state_group=state_group,
349 current_state_ids=current_state_ids,
350 prev_state_ids=prev_state_ids,
351 prev_group=prev_group,
334 state_ids_after_event = dict(state_ids_before_event)
335 state_ids_after_event[key] = event.event_id
336 delta_ids = {key: event.event_id}
337
338 state_group_after_event = yield self.state_store.store_state_group(
339 event.event_id,
340 event.room_id,
341 prev_group=state_group_before_event,
352342 delta_ids=delta_ids,
353 )
354
355 return context
343 current_state_ids=state_ids_after_event,
344 )
345
346 return EventContext.with_state(
347 state_group=state_group_after_event,
348 state_group_before_event=state_group_before_event,
349 current_state_ids=state_ids_after_event,
350 prev_state_ids=state_ids_before_event,
351 prev_group=state_group_before_event,
352 delta_ids=delta_ids,
353 )
356354
357355 @measure_func()
358356 @defer.inlineCallbacks
375373 # map from state group id to the state in that state group (where
376374 # 'state' is a map from state key to event id)
377375 # dict[int, dict[(str, str), str]]
378 state_groups_ids = yield self.store.get_state_groups_ids(room_id, event_ids)
376 state_groups_ids = yield self.state_store.get_state_groups_ids(
377 room_id, event_ids
378 )
379379
380380 if len(state_groups_ids) == 0:
381381 return _StateCacheEntry(state={}, state_group=None)
382382 elif len(state_groups_ids) == 1:
383383 name, state_list = list(state_groups_ids.items()).pop()
384384
385 prev_group, delta_ids = yield self.store.get_state_group_delta(name)
385 prev_group, delta_ids = yield self.state_store.get_state_group_delta(name)
386386
387387 return _StateCacheEntry(
388388 state=state_list,
2626 stored in `synapse.storage.schema`.
2727 """
2828
29 from synapse.storage.data_stores.main import DataStore # noqa: F401
29 from synapse.storage.data_stores import DataStores
30 from synapse.storage.data_stores.main import DataStore
31 from synapse.storage.persist_events import EventsPersistenceStorage
32 from synapse.storage.purge_events import PurgeEventsStorage
33 from synapse.storage.state import StateGroupStorage
34
35 __all__ = ["DataStores", "DataStore"]
36
37
38 class Storage(object):
39 """The high level interfaces for talking to various storage layers.
40 """
41
42 def __init__(self, hs, stores: DataStores):
43 # We include the main data store here mainly so that we don't have to
44 # rewrite all the existing code to split it into high vs low level
45 # interfaces.
46 self.main = stores.main
47
48 self.persistence = EventsPersistenceStorage(hs, stores)
49 self.purge_events = PurgeEventsStorage(hs, stores)
50 self.state = StateGroupStorage(hs, stores)
3051
3152
3253 def are_all_users_on_domain(txn, database_engine, domain):
360360 expiration_ts,
361361 )
362362
363 self._simple_insert_txn(
363 self._simple_upsert_txn(
364364 txn,
365365 "account_validity",
366 values={
367 "user_id": user_id,
368 "expiration_ts_ms": expiration_ts,
369 "email_sent": False,
370 },
366 keyvalues={"user_id": user_id},
367 values={"expiration_ts_ms": expiration_ts, "email_sent": False},
371368 )
372369
373370 def start_profiling(self):
493490 exception_callbacks = []
494491
495492 if LoggingContext.current_context() == LoggingContext.sentinel:
496 logger.warn("Starting db txn '%s' from sentinel context", desc)
493 logger.warning("Starting db txn '%s' from sentinel context", desc)
497494
498495 try:
499496 result = yield self.runWithConnection(
531528 """
532529 parent_context = LoggingContext.current_context()
533530 if parent_context == LoggingContext.sentinel:
534 logger.warn(
531 logger.warning(
535532 "Starting db connection from sentinel context: metrics will be lost"
536533 )
537534 parent_context = None
718715 raise
719716
720717 # presumably we raced with another transaction: let's retry.
721 logger.warn(
718 logger.warning(
722719 "IntegrityError when upserting into %s; retrying: %s", table, e
723720 )
724721
9393 self._all_done = False
9494
9595 def start_doing_background_updates(self):
96 run_as_background_process("background_updates", self._run_background_updates)
96 run_as_background_process("background_updates", self.run_background_updates)
9797
9898 @defer.inlineCallbacks
99 def _run_background_updates(self):
99 def run_background_updates(self, sleep=True):
100100 logger.info("Starting background schema updates")
101101 while True:
102 yield self.hs.get_clock().sleep(self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0)
102 if sleep:
103 yield self.hs.get_clock().sleep(
104 self.BACKGROUND_UPDATE_INTERVAL_MS / 1000.0
105 )
103106
104107 try:
105108 result = yield self.do_next_background_update(
1111 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212 # See the License for the specific language governing permissions and
1313 # limitations under the License.
14
15
16 class DataStores(object):
17 """The various data stores.
18
19 These are low level interfaces to physical databases.
20 """
21
22 def __init__(self, main_store, db_conn, hs):
23 # Note we pass in the main store here as workers use a different main
24 # store.
25 self.main = main_store
138138 db_conn, "public_room_list_stream", "stream_id"
139139 )
140140 self._device_list_id_gen = StreamIdGenerator(
141 db_conn, "device_lists_stream", "stream_id"
141 db_conn,
142 "device_lists_stream",
143 "stream_id",
144 extra_tables=[("user_signature_stream", "stream_id")],
142145 )
143146 self._cross_signing_id_gen = StreamIdGenerator(
144147 db_conn, "e2e_cross_signing_keys", "stream_id"
316319 ) u
317320 """
318321 txn.execute(sql, (time_from,))
319 count, = txn.fetchone()
322 (count,) = txn.fetchone()
320323 return count
321324
322325 def count_r30_users(self):
395398
396399 txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
397400
398 count, = txn.fetchone()
401 (count,) = txn.fetchone()
399402 results["all"] = count
400403
401404 return results
357357 def _add_messages_to_local_device_inbox_txn(
358358 self, txn, stream_id, messages_by_user_then_device
359359 ):
360 sql = "UPDATE device_max_stream_id" " SET stream_id = ?" " WHERE stream_id < ?"
361 txn.execute(sql, (stream_id, stream_id))
360 # Compatible method of performing an upsert
361 sql = "SELECT stream_id FROM device_max_stream_id"
362
363 txn.execute(sql)
364 rows = txn.fetchone()
365 if rows:
366 db_stream_id = rows[0]
367 if db_stream_id < stream_id:
368 # Insert the new stream_id
369 sql = "UPDATE device_max_stream_id SET stream_id = ?"
370 else:
371 # No rows, perform an insert
372 sql = "INSERT INTO device_max_stream_id (stream_id) VALUES (?)"
373
374 txn.execute(sql, (stream_id,))
362375
363376 local_by_user_then_device = {}
364377 for user_id, messages_by_device in messages_by_user_then_device.items():
3636 make_in_list_sql_clause,
3737 )
3838 from synapse.storage.background_updates import BackgroundUpdateStore
39 from synapse.types import get_verify_key_from_cross_signing_key
3940 from synapse.util import batch_iter
4041 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks, cachedList
4142
8990
9091 @trace
9192 @defer.inlineCallbacks
92 def get_devices_by_remote(self, destination, from_stream_id, limit):
93 """Get stream of updates to send to remote servers
94
95 Returns:
96 Deferred[tuple[int, list[dict]]]:
93 def get_device_updates_by_remote(self, destination, from_stream_id, limit):
94 """Get a stream of device updates to send to the given remote server.
95
96 Args:
97 destination (str): The host the device updates are intended for
98 from_stream_id (int): The minimum stream_id to filter updates by, exclusive
99 limit (int): Maximum number of device updates to return
100 Returns:
101 Deferred[tuple[int, list[tuple[string,dict]]]]:
97102 current stream id (ie, the stream id of the last update included in the
98 response), and the list of updates
103 response), and the list of updates, where each update is a pair of EDU
104 type and EDU contents
99105 """
100106 now_stream_id = self._device_list_id_gen.get_current_token()
101107
116122 # stream_id; the rationale being that such a large device list update
117123 # is likely an error.
118124 updates = yield self.runInteraction(
119 "get_devices_by_remote",
120 self._get_devices_by_remote_txn,
125 "get_device_updates_by_remote",
126 self._get_device_updates_by_remote_txn,
121127 destination,
122128 from_stream_id,
123129 now_stream_id,
127133 # Return an empty list if there are no updates
128134 if not updates:
129135 return now_stream_id, []
136
137 # get the cross-signing keys of the users in the list, so that we can
138 # determine which of the device changes were cross-signing keys
139 users = set(r[0] for r in updates)
140 master_key_by_user = {}
141 self_signing_key_by_user = {}
142 for user in users:
143 cross_signing_key = yield self.get_e2e_cross_signing_key(user, "master")
144 if cross_signing_key:
145 key_id, verify_key = get_verify_key_from_cross_signing_key(
146 cross_signing_key
147 )
148 # verify_key is a VerifyKey from signedjson, which uses
149 # .version to denote the portion of the key ID after the
150 # algorithm and colon, which is the device ID
151 master_key_by_user[user] = {
152 "key_info": cross_signing_key,
153 "device_id": verify_key.version,
154 }
155
156 cross_signing_key = yield self.get_e2e_cross_signing_key(
157 user, "self_signing"
158 )
159 if cross_signing_key:
160 key_id, verify_key = get_verify_key_from_cross_signing_key(
161 cross_signing_key
162 )
163 self_signing_key_by_user[user] = {
164 "key_info": cross_signing_key,
165 "device_id": verify_key.version,
166 }
130167
131168 # if we have exceeded the limit, we need to exclude any results with the
132169 # same stream_id as the last row.
152189 # context which created the Edu.
153190
154191 query_map = {}
155 for update in updates:
156 if stream_id_cutoff is not None and update[2] >= stream_id_cutoff:
192 cross_signing_keys_by_user = {}
193 for user_id, device_id, update_stream_id, update_context in updates:
194 if stream_id_cutoff is not None and update_stream_id >= stream_id_cutoff:
157195 # Stop processing updates
158196 break
159197
160 key = (update[0], update[1])
161
162 update_context = update[3]
163 update_stream_id = update[2]
164
165 previous_update_stream_id, _ = query_map.get(key, (0, None))
166
167 if update_stream_id > previous_update_stream_id:
168 query_map[key] = (update_stream_id, update_context)
198 if (
199 user_id in master_key_by_user
200 and device_id == master_key_by_user[user_id]["device_id"]
201 ):
202 result = cross_signing_keys_by_user.setdefault(user_id, {})
203 result["master_key"] = master_key_by_user[user_id]["key_info"]
204 elif (
205 user_id in self_signing_key_by_user
206 and device_id == self_signing_key_by_user[user_id]["device_id"]
207 ):
208 result = cross_signing_keys_by_user.setdefault(user_id, {})
209 result["self_signing_key"] = self_signing_key_by_user[user_id][
210 "key_info"
211 ]
212 else:
213 key = (user_id, device_id)
214
215 previous_update_stream_id, _ = query_map.get(key, (0, None))
216
217 if update_stream_id > previous_update_stream_id:
218 query_map[key] = (update_stream_id, update_context)
169219
170220 # If we didn't find any updates with a stream_id lower than the cutoff, it
171221 # means that there are more than limit updates all of which have the same
175225 # devices, in which case E2E isn't going to work well anyway. We'll just
176226 # skip that stream_id and return an empty list, and continue with the next
177227 # stream_id next time.
178 if not query_map:
228 if not query_map and not cross_signing_keys_by_user:
179229 return stream_id_cutoff, []
180230
181231 results = yield self._get_device_update_edus_by_remote(
182232 destination, from_stream_id, query_map
183233 )
184234
235 # add the updated cross-signing keys to the results list
236 for user_id, result in iteritems(cross_signing_keys_by_user):
237 result["user_id"] = user_id
238 # FIXME: switch to m.signing_key_update when MSC1756 is merged into the spec
239 results.append(("org.matrix.signing_key_update", result))
240
185241 return now_stream_id, results
186242
187 def _get_devices_by_remote_txn(
243 def _get_device_updates_by_remote_txn(
188244 self, txn, destination, from_stream_id, now_stream_id, limit
189245 ):
190246 """Return device update information for a given remote destination
199255 Returns:
200256 List: List of device updates
201257 """
258 # get the list of device updates that need to be sent
202259 sql = """
203260 SELECT user_id, device_id, stream_id, opentracing_context FROM device_lists_outbound_pokes
204261 WHERE destination = ? AND ? < stream_id AND stream_id <= ? AND sent = ?
224281 List[Dict]: List of objects representing an device update EDU
225282
226283 """
227 devices = yield self.runInteraction(
228 "_get_e2e_device_keys_txn",
229 self._get_e2e_device_keys_txn,
230 query_map.keys(),
231 include_all_devices=True,
232 include_deleted_devices=True,
284 devices = (
285 yield self.runInteraction(
286 "_get_e2e_device_keys_txn",
287 self._get_e2e_device_keys_txn,
288 query_map.keys(),
289 include_all_devices=True,
290 include_deleted_devices=True,
291 )
292 if query_map
293 else {}
233294 )
234295
235296 results = []
261322 else:
262323 result["deleted"] = True
263324
264 results.append(result)
325 results.append(("m.device_list_update", result))
265326
266327 return results
267328
320320 def _delete_e2e_room_keys_version_txn(txn):
321321 if version is None:
322322 this_version = self._get_current_version(txn, user_id)
323 if this_version is None:
324 raise StoreError(404, "No current backup version")
323325 else:
324326 this_version = version
327
328 self._simple_delete_txn(
329 txn,
330 table="e2e_room_keys",
331 keyvalues={"user_id": user_id, "version": this_version},
332 )
325333
326334 return self._simple_update_one_txn(
327335 txn,
312312 user_id,
313313 key_type,
314314 from_user_id,
315 )
316
317 def get_all_user_signature_changes_for_remotes(self, from_key, to_key):
318 """Return a list of changes from the user signature stream to notify remotes.
319 Note that the user signature stream represents when a user signs their
320 device with their user-signing key, which is not published to other
321 users or servers, so no `destination` is needed in the returned
322 list. However, this is needed to poke workers.
323
324 Args:
325 from_key (int): the stream ID to start at (exclusive)
326 to_key (int): the stream ID to end at (inclusive)
327
328 Returns:
329 Deferred[list[(int,str)]] a list of `(stream_id, user_id)`
330 """
331 sql = """
332 SELECT MAX(stream_id) AS stream_id, from_user_id AS user_id
333 FROM user_signature_stream
334 WHERE ? < stream_id AND stream_id <= ?
335 GROUP BY user_id
336 """
337 return self._execute(
338 "get_all_user_signature_changes_for_remotes", None, sql, from_key, to_key
315339 )
316340
317341
363363 )
364364
365365 def _get_backfill_events(self, txn, room_id, event_list, limit):
366 logger.debug(
367 "_get_backfill_events: %s, %s, %s", room_id, repr(event_list), limit
368 )
366 logger.debug("_get_backfill_events: %s, %r, %s", room_id, event_list, limit)
369367
370368 event_results = set()
371369
862862 )
863863 stream_row = txn.fetchone()
864864 if stream_row:
865 offset_stream_ordering, = stream_row
865 (offset_stream_ordering,) = stream_row
866866 rotate_to_stream_ordering = min(
867867 self.stream_ordering_day_ago, offset_stream_ordering
868868 )
1616
1717 import itertools
1818 import logging
19 from collections import Counter as c_counter, OrderedDict, deque, namedtuple
19 from collections import Counter as c_counter, OrderedDict, namedtuple
2020 from functools import wraps
2121
2222 from six import iteritems, text_type
2323 from six.moves import range
2424
2525 from canonicaljson import json
26 from prometheus_client import Counter, Histogram
26 from prometheus_client import Counter
2727
2828 from twisted.internet import defer
2929
3030 import synapse.metrics
31 from synapse.api.constants import EventTypes
31 from synapse.api.constants import EventContentFields, EventTypes
3232 from synapse.api.errors import SynapseError
3333 from synapse.events import EventBase # noqa: F401
3434 from synapse.events.snapshot import EventContext # noqa: F401
3535 from synapse.events.utils import prune_event_dict
36 from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
3736 from synapse.logging.utils import log_function
3837 from synapse.metrics import BucketCollector
3938 from synapse.metrics.background_process_metrics import run_as_background_process
40 from synapse.state import StateResolutionStore
4139 from synapse.storage._base import make_in_list_sql_clause
4240 from synapse.storage.background_updates import BackgroundUpdateStore
4341 from synapse.storage.data_stores.main.event_federation import EventFederationStore
4543 from synapse.storage.data_stores.main.state import StateGroupWorkerStore
4644 from synapse.types import RoomStreamToken, get_domain_from_id
4745 from synapse.util import batch_iter
48 from synapse.util.async_helpers import ObservableDeferred
4946 from synapse.util.caches.descriptors import cached, cachedInlineCallbacks
5047 from synapse.util.frozenutils import frozendict_json_encoder
51 from synapse.util.metrics import Measure
5248
5349 logger = logging.getLogger(__name__)
5450
5753 "synapse_storage_events_persisted_events_sep",
5854 "",
5955 ["type", "origin_type", "origin_entity"],
60 )
61
62 # The number of times we are recalculating the current state
63 state_delta_counter = Counter("synapse_storage_events_state_delta", "")
64
65 # The number of times we are recalculating state when there is only a
66 # single forward extremity
67 state_delta_single_event_counter = Counter(
68 "synapse_storage_events_state_delta_single_event", ""
69 )
70
71 # The number of times we are reculating state when we could have resonably
72 # calculated the delta when we calculated the state for an event we were
73 # persisting.
74 state_delta_reuse_delta_counter = Counter(
75 "synapse_storage_events_state_delta_reuse_delta", ""
76 )
77
78 # The number of forward extremities for each new event.
79 forward_extremities_counter = Histogram(
80 "synapse_storage_events_forward_extremities_persisted",
81 "Number of forward extremities for each new event",
82 buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
83 )
84
85 # The number of stale forward extremities for each new event. Stale extremities
86 # are those that were in the previous set of extremities as well as the new.
87 stale_forward_extremities_counter = Histogram(
88 "synapse_storage_events_stale_forward_extremities_persisted",
89 "Number of unchanged forward extremities for each new event",
90 buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
9156 )
9257
9358
10166 return out
10267
10368
104 class _EventPeristenceQueue(object):
105 """Queues up events so that they can be persisted in bulk with only one
106 concurrent transaction per room.
107 """
108
109 _EventPersistQueueItem = namedtuple(
110 "_EventPersistQueueItem", ("events_and_contexts", "backfilled", "deferred")
111 )
112
113 def __init__(self):
114 self._event_persist_queues = {}
115 self._currently_persisting_rooms = set()
116
117 def add_to_queue(self, room_id, events_and_contexts, backfilled):
118 """Add events to the queue, with the given persist_event options.
119
120 NB: due to the normal usage pattern of this method, it does *not*
121 follow the synapse logcontext rules, and leaves the logcontext in
122 place whether or not the returned deferred is ready.
123
124 Args:
125 room_id (str):
126 events_and_contexts (list[(EventBase, EventContext)]):
127 backfilled (bool):
128
129 Returns:
130 defer.Deferred: a deferred which will resolve once the events are
131 persisted. Runs its callbacks *without* a logcontext.
132 """
133 queue = self._event_persist_queues.setdefault(room_id, deque())
134 if queue:
135 # if the last item in the queue has the same `backfilled` setting,
136 # we can just add these new events to that item.
137 end_item = queue[-1]
138 if end_item.backfilled == backfilled:
139 end_item.events_and_contexts.extend(events_and_contexts)
140 return end_item.deferred.observe()
141
142 deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
143
144 queue.append(
145 self._EventPersistQueueItem(
146 events_and_contexts=events_and_contexts,
147 backfilled=backfilled,
148 deferred=deferred,
149 )
150 )
151
152 return deferred.observe()
153
154 def handle_queue(self, room_id, per_item_callback):
155 """Attempts to handle the queue for a room if not already being handled.
156
157 The given callback will be invoked with for each item in the queue,
158 of type _EventPersistQueueItem. The per_item_callback will continuously
159 be called with new items, unless the queue becomnes empty. The return
160 value of the function will be given to the deferreds waiting on the item,
161 exceptions will be passed to the deferreds as well.
162
163 This function should therefore be called whenever anything is added
164 to the queue.
165
166 If another callback is currently handling the queue then it will not be
167 invoked.
168 """
169
170 if room_id in self._currently_persisting_rooms:
171 return
172
173 self._currently_persisting_rooms.add(room_id)
174
175 @defer.inlineCallbacks
176 def handle_queue_loop():
177 try:
178 queue = self._get_drainining_queue(room_id)
179 for item in queue:
180 try:
181 ret = yield per_item_callback(item)
182 except Exception:
183 with PreserveLoggingContext():
184 item.deferred.errback()
185 else:
186 with PreserveLoggingContext():
187 item.deferred.callback(ret)
188 finally:
189 queue = self._event_persist_queues.pop(room_id, None)
190 if queue:
191 self._event_persist_queues[room_id] = queue
192 self._currently_persisting_rooms.discard(room_id)
193
194 # set handle_queue_loop off in the background
195 run_as_background_process("persist_events", handle_queue_loop)
196
197 def _get_drainining_queue(self, room_id):
198 queue = self._event_persist_queues.setdefault(room_id, deque())
199
200 try:
201 while True:
202 yield queue.popleft()
203 except IndexError:
204 # Queue has been drained.
205 pass
206
207
20869 _EventCacheEntry = namedtuple("_EventCacheEntry", ("event", "redacted_event"))
20970
21071
22081 @defer.inlineCallbacks
22182 def f(self, *args, **kwargs):
22283 try:
223 res = yield func(self, *args, **kwargs)
84 res = yield func(self, *args, delete_existing=False, **kwargs)
22485 except self.database_engine.module.IntegrityError:
22586 logger.exception("IntegrityError, retrying.")
22687 res = yield func(self, *args, delete_existing=True, **kwargs)
240101 def __init__(self, db_conn, hs):
241102 super(EventsStore, self).__init__(db_conn, hs)
242103
243 self._event_persist_queue = _EventPeristenceQueue()
244 self._state_resolution_handler = hs.get_state_resolution_handler()
245
246104 # Collect metrics on the number of forward extremities that exist.
247105 # Counter of number of extremities to count
248106 self._current_forward_extremities_amount = c_counter()
285143 res = yield self.runInteraction("read_forward_extremities", fetch)
286144 self._current_forward_extremities_amount = c_counter(list(x[0] for x in res))
287145
288 @defer.inlineCallbacks
289 def persist_events(self, events_and_contexts, backfilled=False):
290 """
291 Write events to the database
292 Args:
293 events_and_contexts: list of tuples of (event, context)
294 backfilled (bool): Whether the results are retrieved from federation
295 via backfill or not. Used to determine if they're "new" events
296 which might update the current state etc.
297
298 Returns:
299 Deferred[int]: the stream ordering of the latest persisted event
300 """
301 partitioned = {}
302 for event, ctx in events_and_contexts:
303 partitioned.setdefault(event.room_id, []).append((event, ctx))
304
305 deferreds = []
306 for room_id, evs_ctxs in iteritems(partitioned):
307 d = self._event_persist_queue.add_to_queue(
308 room_id, evs_ctxs, backfilled=backfilled
309 )
310 deferreds.append(d)
311
312 for room_id in partitioned:
313 self._maybe_start_persisting(room_id)
314
315 yield make_deferred_yieldable(
316 defer.gatherResults(deferreds, consumeErrors=True)
317 )
318
319 max_persisted_id = yield self._stream_id_gen.get_current_token()
320
321 return max_persisted_id
322
323 @defer.inlineCallbacks
324 @log_function
325 def persist_event(self, event, context, backfilled=False):
326 """
327
328 Args:
329 event (EventBase):
330 context (EventContext):
331 backfilled (bool):
332
333 Returns:
334 Deferred: resolves to (int, int): the stream ordering of ``event``,
335 and the stream ordering of the latest persisted event
336 """
337 deferred = self._event_persist_queue.add_to_queue(
338 event.room_id, [(event, context)], backfilled=backfilled
339 )
340
341 self._maybe_start_persisting(event.room_id)
342
343 yield make_deferred_yieldable(deferred)
344
345 max_persisted_id = yield self._stream_id_gen.get_current_token()
346 return (event.internal_metadata.stream_ordering, max_persisted_id)
347
348 def _maybe_start_persisting(self, room_id):
349 @defer.inlineCallbacks
350 def persisting_queue(item):
351 with Measure(self._clock, "persist_events"):
352 yield self._persist_events(
353 item.events_and_contexts, backfilled=item.backfilled
354 )
355
356 self._event_persist_queue.handle_queue(room_id, persisting_queue)
357
358146 @_retry_on_integrity_error
359147 @defer.inlineCallbacks
360 def _persist_events(
361 self, events_and_contexts, backfilled=False, delete_existing=False
148 def _persist_events_and_state_updates(
149 self,
150 events_and_contexts,
151 current_state_for_room,
152 state_delta_for_room,
153 new_forward_extremeties,
154 backfilled=False,
155 delete_existing=False,
362156 ):
363 """Persist events to db
157 """Persist a set of events alongside updates to the current state and
158 forward extremities tables.
364159
365160 Args:
366161 events_and_contexts (list[(EventBase, EventContext)]):
367 backfilled (bool):
162 current_state_for_room (dict[str, dict]): Map from room_id to the
163 current state of the room based on forward extremities
164 state_delta_for_room (dict[str, tuple]): Map from room_id to tuple
165 of `(to_delete, to_insert)` where to_delete is a list
166 of type/state keys to remove from current state, and to_insert
167 is a map (type,key)->event_id giving the state delta in each
168 room.
169 new_forward_extremities (dict[str, list[str]]): Map from room_id
170 to list of event IDs that are the new forward extremities of
171 the room.
172 backfilled (bool)
368173 delete_existing (bool):
369174
370175 Returns:
371176 Deferred: resolves when the events have been persisted
372177 """
373 if not events_and_contexts:
374 return
375
376 chunks = [
377 events_and_contexts[x : x + 100]
378 for x in range(0, len(events_and_contexts), 100)
379 ]
380
381 for chunk in chunks:
382 # We can't easily parallelize these since different chunks
383 # might contain the same event. :(
384
385 # NB: Assumes that we are only persisting events for one room
386 # at a time.
387
388 # map room_id->list[event_ids] giving the new forward
389 # extremities in each room
390 new_forward_extremeties = {}
391
392 # map room_id->(type,state_key)->event_id tracking the full
393 # state in each room after adding these events.
394 # This is simply used to prefill the get_current_state_ids
395 # cache
396 current_state_for_room = {}
397
398 # map room_id->(to_delete, to_insert) where to_delete is a list
399 # of type/state keys to remove from current state, and to_insert
400 # is a map (type,key)->event_id giving the state delta in each
401 # room
402 state_delta_for_room = {}
178
179 # We want to calculate the stream orderings as late as possible, as
180 # we only notify after all events with a lesser stream ordering have
181 # been persisted. I.e. if we spend 10s inside the with block then
182 # that will delay all subsequent events from being notified about.
183 # Hence why we do it down here rather than wrapping the entire
184 # function.
185 #
186 # Its safe to do this after calculating the state deltas etc as we
187 # only need to protect the *persistence* of the events. This is to
188 # ensure that queries of the form "fetch events since X" don't
189 # return events and stream positions after events that are still in
190 # flight, as otherwise subsequent requests "fetch event since Y"
191 # will not return those events.
192 #
193 # Note: Multiple instances of this function cannot be in flight at
194 # the same time for the same room.
195 if backfilled:
196 stream_ordering_manager = self._backfill_id_gen.get_next_mult(
197 len(events_and_contexts)
198 )
199 else:
200 stream_ordering_manager = self._stream_id_gen.get_next_mult(
201 len(events_and_contexts)
202 )
203
204 with stream_ordering_manager as stream_orderings:
205 for (event, context), stream in zip(events_and_contexts, stream_orderings):
206 event.internal_metadata.stream_ordering = stream
207
208 yield self.runInteraction(
209 "persist_events",
210 self._persist_events_txn,
211 events_and_contexts=events_and_contexts,
212 backfilled=backfilled,
213 delete_existing=delete_existing,
214 state_delta_for_room=state_delta_for_room,
215 new_forward_extremeties=new_forward_extremeties,
216 )
217 persist_event_counter.inc(len(events_and_contexts))
403218
404219 if not backfilled:
405 with Measure(self._clock, "_calculate_state_and_extrem"):
406 # Work out the new "current state" for each room.
407 # We do this by working out what the new extremities are and then
408 # calculating the state from that.
409 events_by_room = {}
410 for event, context in chunk:
411 events_by_room.setdefault(event.room_id, []).append(
412 (event, context)
413 )
414
415 for room_id, ev_ctx_rm in iteritems(events_by_room):
416 latest_event_ids = yield self.get_latest_event_ids_in_room(
417 room_id
418 )
419 new_latest_event_ids = yield self._calculate_new_extremities(
420 room_id, ev_ctx_rm, latest_event_ids
421 )
422
423 latest_event_ids = set(latest_event_ids)
424 if new_latest_event_ids == latest_event_ids:
425 # No change in extremities, so no change in state
426 continue
427
428 # there should always be at least one forward extremity.
429 # (except during the initial persistence of the send_join
430 # results, in which case there will be no existing
431 # extremities, so we'll `continue` above and skip this bit.)
432 assert new_latest_event_ids, "No forward extremities left!"
433
434 new_forward_extremeties[room_id] = new_latest_event_ids
435
436 len_1 = (
437 len(latest_event_ids) == 1
438 and len(new_latest_event_ids) == 1
439 )
440 if len_1:
441 all_single_prev_not_state = all(
442 len(event.prev_event_ids()) == 1
443 and not event.is_state()
444 for event, ctx in ev_ctx_rm
445 )
446 # Don't bother calculating state if they're just
447 # a long chain of single ancestor non-state events.
448 if all_single_prev_not_state:
449 continue
450
451 state_delta_counter.inc()
452 if len(new_latest_event_ids) == 1:
453 state_delta_single_event_counter.inc()
454
455 # This is a fairly handwavey check to see if we could
456 # have guessed what the delta would have been when
457 # processing one of these events.
458 # What we're interested in is if the latest extremities
459 # were the same when we created the event as they are
460 # now. When this server creates a new event (as opposed
461 # to receiving it over federation) it will use the
462 # forward extremities as the prev_events, so we can
463 # guess this by looking at the prev_events and checking
464 # if they match the current forward extremities.
465 for ev, _ in ev_ctx_rm:
466 prev_event_ids = set(ev.prev_event_ids())
467 if latest_event_ids == prev_event_ids:
468 state_delta_reuse_delta_counter.inc()
469 break
470
471 logger.info("Calculating state delta for room %s", room_id)
472 with Measure(
473 self._clock, "persist_events.get_new_state_after_events"
474 ):
475 res = yield self._get_new_state_after_events(
476 room_id,
477 ev_ctx_rm,
478 latest_event_ids,
479 new_latest_event_ids,
480 )
481 current_state, delta_ids = res
482
483 # If either are not None then there has been a change,
484 # and we need to work out the delta (or use that
485 # given)
486 if delta_ids is not None:
487 # If there is a delta we know that we've
488 # only added or replaced state, never
489 # removed keys entirely.
490 state_delta_for_room[room_id] = ([], delta_ids)
491 elif current_state is not None:
492 with Measure(
493 self._clock, "persist_events.calculate_state_delta"
494 ):
495 delta = yield self._calculate_state_delta(
496 room_id, current_state
497 )
498 state_delta_for_room[room_id] = delta
499
500 # If we have the current_state then lets prefill
501 # the cache with it.
502 if current_state is not None:
503 current_state_for_room[room_id] = current_state
504
505 # We want to calculate the stream orderings as late as possible, as
506 # we only notify after all events with a lesser stream ordering have
507 # been persisted. I.e. if we spend 10s inside the with block then
508 # that will delay all subsequent events from being notified about.
509 # Hence why we do it down here rather than wrapping the entire
510 # function.
511 #
512 # Its safe to do this after calculating the state deltas etc as we
513 # only need to protect the *persistence* of the events. This is to
514 # ensure that queries of the form "fetch events since X" don't
515 # return events and stream positions after events that are still in
516 # flight, as otherwise subsequent requests "fetch event since Y"
517 # will not return those events.
518 #
519 # Note: Multiple instances of this function cannot be in flight at
520 # the same time for the same room.
521 if backfilled:
522 stream_ordering_manager = self._backfill_id_gen.get_next_mult(
523 len(chunk)
220 # backfilled events have negative stream orderings, so we don't
221 # want to set the event_persisted_position to that.
222 synapse.metrics.event_persisted_position.set(
223 events_and_contexts[-1][0].internal_metadata.stream_ordering
524224 )
525 else:
526 stream_ordering_manager = self._stream_id_gen.get_next_mult(len(chunk))
527
528 with stream_ordering_manager as stream_orderings:
529 for (event, context), stream in zip(chunk, stream_orderings):
530 event.internal_metadata.stream_ordering = stream
531
532 yield self.runInteraction(
533 "persist_events",
534 self._persist_events_txn,
535 events_and_contexts=chunk,
536 backfilled=backfilled,
537 delete_existing=delete_existing,
538 state_delta_for_room=state_delta_for_room,
539 new_forward_extremeties=new_forward_extremeties,
225
226 for event, context in events_and_contexts:
227 if context.app_service:
228 origin_type = "local"
229 origin_entity = context.app_service.id
230 elif self.hs.is_mine_id(event.sender):
231 origin_type = "local"
232 origin_entity = "*client*"
233 else:
234 origin_type = "remote"
235 origin_entity = get_domain_from_id(event.sender)
236
237 event_counter.labels(event.type, origin_type, origin_entity).inc()
238
239 for room_id, new_state in iteritems(current_state_for_room):
240 self.get_current_state_ids.prefill((room_id,), new_state)
241
242 for room_id, latest_event_ids in iteritems(new_forward_extremeties):
243 self.get_latest_event_ids_in_room.prefill(
244 (room_id,), list(latest_event_ids)
540245 )
541 persist_event_counter.inc(len(chunk))
542
543 if not backfilled:
544 # backfilled events have negative stream orderings, so we don't
545 # want to set the event_persisted_position to that.
546 synapse.metrics.event_persisted_position.set(
547 chunk[-1][0].internal_metadata.stream_ordering
548 )
549
550 for event, context in chunk:
551 if context.app_service:
552 origin_type = "local"
553 origin_entity = context.app_service.id
554 elif self.hs.is_mine_id(event.sender):
555 origin_type = "local"
556 origin_entity = "*client*"
557 else:
558 origin_type = "remote"
559 origin_entity = get_domain_from_id(event.sender)
560
561 event_counter.labels(event.type, origin_type, origin_entity).inc()
562
563 for room_id, new_state in iteritems(current_state_for_room):
564 self.get_current_state_ids.prefill((room_id,), new_state)
565
566 for room_id, latest_event_ids in iteritems(new_forward_extremeties):
567 self.get_latest_event_ids_in_room.prefill(
568 (room_id,), list(latest_event_ids)
569 )
570
571 @defer.inlineCallbacks
572 def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
573 """Calculates the new forward extremities for a room given events to
574 persist.
575
576 Assumes that we are only persisting events for one room at a time.
577 """
578
579 # we're only interested in new events which aren't outliers and which aren't
580 # being rejected.
581 new_events = [
582 event
583 for event, ctx in event_contexts
584 if not event.internal_metadata.is_outlier()
585 and not ctx.rejected
586 and not event.internal_metadata.is_soft_failed()
587 ]
588
589 latest_event_ids = set(latest_event_ids)
590
591 # start with the existing forward extremities
592 result = set(latest_event_ids)
593
594 # add all the new events to the list
595 result.update(event.event_id for event in new_events)
596
597 # Now remove all events which are prev_events of any of the new events
598 result.difference_update(
599 e_id for event in new_events for e_id in event.prev_event_ids()
600 )
601
602 # Remove any events which are prev_events of any existing events.
603 existing_prevs = yield self._get_events_which_are_prevs(result)
604 result.difference_update(existing_prevs)
605
606 # Finally handle the case where the new events have soft-failed prev
607 # events. If they do we need to remove them and their prev events,
608 # otherwise we end up with dangling extremities.
609 existing_prevs = yield self._get_prevs_before_rejected(
610 e_id for event in new_events for e_id in event.prev_event_ids()
611 )
612 result.difference_update(existing_prevs)
613
614 # We only update metrics for events that change forward extremities
615 # (e.g. we ignore backfill/outliers/etc)
616 if result != latest_event_ids:
617 forward_extremities_counter.observe(len(result))
618 stale = latest_event_ids & result
619 stale_forward_extremities_counter.observe(len(stale))
620
621 return result
622246
623247 @defer.inlineCallbacks
624248 def _get_events_which_are_prevs(self, event_ids):
723347 )
724348
725349 return existing_prevs
726
727 @defer.inlineCallbacks
728 def _get_new_state_after_events(
729 self, room_id, events_context, old_latest_event_ids, new_latest_event_ids
730 ):
731 """Calculate the current state dict after adding some new events to
732 a room
733
734 Args:
735 room_id (str):
736 room to which the events are being added. Used for logging etc
737
738 events_context (list[(EventBase, EventContext)]):
739 events and contexts which are being added to the room
740
741 old_latest_event_ids (iterable[str]):
742 the old forward extremities for the room.
743
744 new_latest_event_ids (iterable[str]):
745 the new forward extremities for the room.
746
747 Returns:
748 Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
749 Returns a tuple of two state maps, the first being the full new current
750 state and the second being the delta to the existing current state.
751 If both are None then there has been no change.
752
753 If there has been a change then we only return the delta if its
754 already been calculated. Conversely if we do know the delta then
755 the new current state is only returned if we've already calculated
756 it.
757 """
758 # map from state_group to ((type, key) -> event_id) state map
759 state_groups_map = {}
760
761 # Map from (prev state group, new state group) -> delta state dict
762 state_group_deltas = {}
763
764 for ev, ctx in events_context:
765 if ctx.state_group is None:
766 # This should only happen for outlier events.
767 if not ev.internal_metadata.is_outlier():
768 raise Exception(
769 "Context for new event %s has no state "
770 "group" % (ev.event_id,)
771 )
772 continue
773
774 if ctx.state_group in state_groups_map:
775 continue
776
777 # We're only interested in pulling out state that has already
778 # been cached in the context. We'll pull stuff out of the DB later
779 # if necessary.
780 current_state_ids = ctx.get_cached_current_state_ids()
781 if current_state_ids is not None:
782 state_groups_map[ctx.state_group] = current_state_ids
783
784 if ctx.prev_group:
785 state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
786
787 # We need to map the event_ids to their state groups. First, let's
788 # check if the event is one we're persisting, in which case we can
789 # pull the state group from its context.
790 # Otherwise we need to pull the state group from the database.
791
792 # Set of events we need to fetch groups for. (We know none of the old
793 # extremities are going to be in events_context).
794 missing_event_ids = set(old_latest_event_ids)
795
796 event_id_to_state_group = {}
797 for event_id in new_latest_event_ids:
798 # First search in the list of new events we're adding.
799 for ev, ctx in events_context:
800 if event_id == ev.event_id and ctx.state_group is not None:
801 event_id_to_state_group[event_id] = ctx.state_group
802 break
803 else:
804 # If we couldn't find it, then we'll need to pull
805 # the state from the database
806 missing_event_ids.add(event_id)
807
808 if missing_event_ids:
809 # Now pull out the state groups for any missing events from DB
810 event_to_groups = yield self._get_state_group_for_events(missing_event_ids)
811 event_id_to_state_group.update(event_to_groups)
812
813 # State groups of old_latest_event_ids
814 old_state_groups = set(
815 event_id_to_state_group[evid] for evid in old_latest_event_ids
816 )
817
818 # State groups of new_latest_event_ids
819 new_state_groups = set(
820 event_id_to_state_group[evid] for evid in new_latest_event_ids
821 )
822
823 # If they old and new groups are the same then we don't need to do
824 # anything.
825 if old_state_groups == new_state_groups:
826 return None, None
827
828 if len(new_state_groups) == 1 and len(old_state_groups) == 1:
829 # If we're going from one state group to another, lets check if
830 # we have a delta for that transition. If we do then we can just
831 # return that.
832
833 new_state_group = next(iter(new_state_groups))
834 old_state_group = next(iter(old_state_groups))
835
836 delta_ids = state_group_deltas.get((old_state_group, new_state_group), None)
837 if delta_ids is not None:
838 # We have a delta from the existing to new current state,
839 # so lets just return that. If we happen to already have
840 # the current state in memory then lets also return that,
841 # but it doesn't matter if we don't.
842 new_state = state_groups_map.get(new_state_group)
843 return new_state, delta_ids
844
845 # Now that we have calculated new_state_groups we need to get
846 # their state IDs so we can resolve to a single state set.
847 missing_state = new_state_groups - set(state_groups_map)
848 if missing_state:
849 group_to_state = yield self._get_state_for_groups(missing_state)
850 state_groups_map.update(group_to_state)
851
852 if len(new_state_groups) == 1:
853 # If there is only one state group, then we know what the current
854 # state is.
855 return state_groups_map[new_state_groups.pop()], None
856
857 # Ok, we need to defer to the state handler to resolve our state sets.
858
859 state_groups = {sg: state_groups_map[sg] for sg in new_state_groups}
860
861 events_map = {ev.event_id: ev for ev, _ in events_context}
862
863 # We need to get the room version, which is in the create event.
864 # Normally that'd be in the database, but its also possible that we're
865 # currently trying to persist it.
866 room_version = None
867 for ev, _ in events_context:
868 if ev.type == EventTypes.Create and ev.state_key == "":
869 room_version = ev.content.get("room_version", "1")
870 break
871
872 if not room_version:
873 room_version = yield self.get_room_version(room_id)
874
875 logger.debug("calling resolve_state_groups from preserve_events")
876 res = yield self._state_resolution_handler.resolve_state_groups(
877 room_id,
878 room_version,
879 state_groups,
880 events_map,
881 state_res_store=StateResolutionStore(self),
882 )
883
884 return res.state, None
885
886 @defer.inlineCallbacks
887 def _calculate_state_delta(self, room_id, current_state):
888 """Calculate the new state deltas for a room.
889
890 Assumes that we are only persisting events for one room at a time.
891
892 Returns:
893 tuple[list, dict] (to_delete, to_insert): where to_delete are the
894 type/state_keys to remove from current_state_events and `to_insert`
895 are the updates to current_state_events.
896 """
897 existing_state = yield self.get_current_state_ids(room_id)
898
899 to_delete = [key for key in existing_state if key not in current_state]
900
901 to_insert = {
902 key: ev_id
903 for key, ev_id in iteritems(current_state)
904 if ev_id != existing_state.get(key)
905 }
906
907 return to_delete, to_insert
908350
909351 @log_function
910352 def _persist_events_txn(
1489931
1490932 self._handle_event_relations(txn, event)
1491933
934 # Store the labels for this event.
935 labels = event.content.get(EventContentFields.LABELS)
936 if labels:
937 self.insert_labels_for_event_txn(
938 txn, event.event_id, labels, event.room_id, event.depth
939 )
940
1492941 # Insert into the room_memberships table.
1493942 self._store_room_members_txn(
1494943 txn,
16821131 AND stream_ordering > ?
16831132 """
16841133 txn.execute(sql, (self.stream_ordering_day_ago,))
1685 count, = txn.fetchone()
1134 (count,) = txn.fetchone()
16861135 return count
16871136
16881137 ret = yield self.runInteraction("count_messages", _count_messages)
17031152 """
17041153
17051154 txn.execute(sql, (like_clause, self.stream_ordering_day_ago))
1706 count, = txn.fetchone()
1155 (count,) = txn.fetchone()
17071156 return count
17081157
17091158 ret = yield self.runInteraction("count_daily_sent_messages", _count_messages)
17181167 AND stream_ordering > ?
17191168 """
17201169 txn.execute(sql, (self.stream_ordering_day_ago,))
1721 count, = txn.fetchone()
1170 (count,) = txn.fetchone()
17221171 return count
17231172
17241173 ret = yield self.runInteraction("count_daily_active_rooms", _count)
19251374 if True, we will delete local events as well as remote ones
19261375 (instead of just marking them as outliers and deleting their
19271376 state groups).
1377
1378 Returns:
1379 Deferred[set[int]]: The set of state groups that are referenced by
1380 deleted events.
19281381 """
19291382
19301383 return self.runInteraction(
20611514 [(room_id, event_id) for event_id, in new_backwards_extrems],
20621515 )
20631516
2064 logger.info("[purge] finding redundant state groups")
1517 logger.info("[purge] finding state groups referenced by deleted events")
20651518
20661519 # Get all state groups that are referenced by events that are to be
2067 # deleted. We then go and check if they are referenced by other events
2068 # or state groups, and if not we delete them.
1520 # deleted.
20691521 txn.execute(
20701522 """
20711523 SELECT DISTINCT state_group FROM events_to_purge
20761528 referenced_state_groups = set(sg for sg, in txn)
20771529 logger.info(
20781530 "[purge] found %i referenced state groups", len(referenced_state_groups)
2079 )
2080
2081 logger.info("[purge] finding state groups that can be deleted")
2082
2083 _ = self._find_unreferenced_groups_during_purge(txn, referenced_state_groups)
2084 state_groups_to_delete, remaining_state_groups = _
2085
2086 logger.info(
2087 "[purge] found %i state groups to delete", len(state_groups_to_delete)
2088 )
2089
2090 logger.info(
2091 "[purge] de-delta-ing %i remaining state groups",
2092 len(remaining_state_groups),
2093 )
2094
2095 # Now we turn the state groups that reference to-be-deleted state
2096 # groups to non delta versions.
2097 for sg in remaining_state_groups:
2098 logger.info("[purge] de-delta-ing remaining state group %s", sg)
2099 curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
2100 curr_state = curr_state[sg]
2101
2102 self._simple_delete_txn(
2103 txn, table="state_groups_state", keyvalues={"state_group": sg}
2104 )
2105
2106 self._simple_delete_txn(
2107 txn, table="state_group_edges", keyvalues={"state_group": sg}
2108 )
2109
2110 self._simple_insert_many_txn(
2111 txn,
2112 table="state_groups_state",
2113 values=[
2114 {
2115 "state_group": sg,
2116 "room_id": room_id,
2117 "type": key[0],
2118 "state_key": key[1],
2119 "event_id": state_id,
2120 }
2121 for key, state_id in iteritems(curr_state)
2122 ],
2123 )
2124
2125 logger.info("[purge] removing redundant state groups")
2126 txn.executemany(
2127 "DELETE FROM state_groups_state WHERE state_group = ?",
2128 ((sg,) for sg in state_groups_to_delete),
2129 )
2130 txn.executemany(
2131 "DELETE FROM state_groups WHERE id = ?",
2132 ((sg,) for sg in state_groups_to_delete),
21331531 )
21341532
21351533 logger.info("[purge] removing events from event_to_state_groups")
22031601 """,
22041602 (room_id,),
22051603 )
2206 min_depth, = txn.fetchone()
1604 (min_depth,) = txn.fetchone()
22071605
22081606 logger.info("[purge] updating room_depth to %d", min_depth)
22091607
22181616
22191617 logger.info("[purge] done")
22201618
2221 def _find_unreferenced_groups_during_purge(self, txn, state_groups):
2222 """Used when purging history to figure out which state groups can be
2223 deleted and which need to be de-delta'ed (due to one of its prev groups
2224 being scheduled for deletion).
2225
2226 Args:
2227 txn
2228 state_groups (set[int]): Set of state groups referenced by events
2229 that are going to be deleted.
2230
2231 Returns:
2232 tuple[set[int], set[int]]: The set of state groups that can be
2233 deleted and the set of state groups that need to be de-delta'ed
2234 """
2235 # Graph of state group -> previous group
2236 graph = {}
2237
2238 # Set of events that we have found to be referenced by events
2239 referenced_groups = set()
2240
2241 # Set of state groups we've already seen
2242 state_groups_seen = set(state_groups)
2243
2244 # Set of state groups to handle next.
2245 next_to_search = set(state_groups)
2246 while next_to_search:
2247 # We bound size of groups we're looking up at once, to stop the
2248 # SQL query getting too big
2249 if len(next_to_search) < 100:
2250 current_search = next_to_search
2251 next_to_search = set()
2252 else:
2253 current_search = set(itertools.islice(next_to_search, 100))
2254 next_to_search -= current_search
2255
2256 # Check if state groups are referenced
2257 sql = """
2258 SELECT DISTINCT state_group FROM event_to_state_groups
2259 LEFT JOIN events_to_purge AS ep USING (event_id)
2260 WHERE ep.event_id IS NULL AND
2261 """
2262 clause, args = make_in_list_sql_clause(
2263 txn.database_engine, "state_group", current_search
2264 )
2265 txn.execute(sql + clause, list(args))
2266
2267 referenced = set(sg for sg, in txn)
2268 referenced_groups |= referenced
2269
2270 # We don't continue iterating up the state group graphs for state
2271 # groups that are referenced.
2272 current_search -= referenced
2273
2274 rows = self._simple_select_many_txn(
2275 txn,
2276 table="state_group_edges",
2277 column="prev_state_group",
2278 iterable=current_search,
2279 keyvalues={},
2280 retcols=("prev_state_group", "state_group"),
2281 )
2282
2283 prevs = set(row["state_group"] for row in rows)
2284 # We don't bother re-handling groups we've already seen
2285 prevs -= state_groups_seen
2286 next_to_search |= prevs
2287 state_groups_seen |= prevs
2288
2289 for row in rows:
2290 # Note: Each state group can have at most one prev group
2291 graph[row["state_group"]] = row["prev_state_group"]
2292
2293 to_delete = state_groups_seen - referenced_groups
2294
2295 to_dedelta = set()
2296 for sg in referenced_groups:
2297 prev_sg = graph.get(sg)
2298 if prev_sg and prev_sg in to_delete:
2299 to_dedelta.add(sg)
2300
2301 return to_delete, to_dedelta
1619 return referenced_state_groups
23021620
23031621 def purge_room(self, room_id):
23041622 """Deletes all record of a room
23051623
23061624 Args:
2307 room_id (str):
1625 room_id (str)
1626
1627 Returns:
1628 Deferred[List[int]]: The list of state groups to delete.
23081629 """
23091630
23101631 return self.runInteraction("purge_room", self._purge_room_txn, room_id)
23111632
23121633 def _purge_room_txn(self, txn, room_id):
2313 # first we have to delete the state groups states
2314 logger.info("[purge] removing %s from state_groups_state", room_id)
2315
1634 # First we fetch all the state groups that should be deleted, before
1635 # we delete that information.
23161636 txn.execute(
23171637 """
2318 DELETE FROM state_groups_state WHERE state_group IN (
2319 SELECT state_group FROM events JOIN event_to_state_groups USING(event_id)
2320 WHERE events.room_id=?
2321 )
1638 SELECT DISTINCT state_group FROM events
1639 INNER JOIN event_to_state_groups USING(event_id)
1640 WHERE events.room_id = ?
23221641 """,
23231642 (room_id,),
23241643 )
23251644
2326 # ... and the state group edges
2327 logger.info("[purge] removing %s from state_group_edges", room_id)
2328
2329 txn.execute(
2330 """
2331 DELETE FROM state_group_edges WHERE state_group IN (
2332 SELECT state_group FROM events JOIN event_to_state_groups USING(event_id)
2333 WHERE events.room_id=?
2334 )
2335 """,
2336 (room_id,),
2337 )
2338
2339 # ... and the state groups
2340 logger.info("[purge] removing %s from state_groups", room_id)
2341
2342 txn.execute(
2343 """
2344 DELETE FROM state_groups WHERE id IN (
2345 SELECT state_group FROM events JOIN event_to_state_groups USING(event_id)
2346 WHERE events.room_id=?
2347 )
2348 """,
2349 (room_id,),
2350 )
2351
2352 # and then tables which lack an index on room_id but have one on event_id
1645 state_groups = [row[0] for row in txn]
1646
1647 # Now we delete tables which lack an index on room_id but have one on event_id
23531648 for table in (
23541649 "event_auth",
23551650 "event_edges",
23951690 "room_stats_earliest_token",
23961691 "rooms",
23971692 "stream_ordering_to_exterm",
2398 "topics",
23991693 "users_in_public_rooms",
24001694 "users_who_share_private_rooms",
24011695 # no useful index, but let's clear them anyway
24381732
24391733 logger.info("[purge] done")
24401734
1735 return state_groups
1736
1737 def purge_unreferenced_state_groups(
1738 self, room_id: str, state_groups_to_delete
1739 ) -> defer.Deferred:
1740 """Deletes no longer referenced state groups and de-deltas any state
1741 groups that reference them.
1742
1743 Args:
1744 room_id: The room the state groups belong to (must all be in the
1745 same room).
1746 state_groups_to_delete (Collection[int]): Set of all state groups
1747 to delete.
1748 """
1749
1750 return self.runInteraction(
1751 "purge_unreferenced_state_groups",
1752 self._purge_unreferenced_state_groups,
1753 room_id,
1754 state_groups_to_delete,
1755 )
1756
1757 def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete):
1758 logger.info(
1759 "[purge] found %i state groups to delete", len(state_groups_to_delete)
1760 )
1761
1762 rows = self._simple_select_many_txn(
1763 txn,
1764 table="state_group_edges",
1765 column="prev_state_group",
1766 iterable=state_groups_to_delete,
1767 keyvalues={},
1768 retcols=("state_group",),
1769 )
1770
1771 remaining_state_groups = set(
1772 row["state_group"]
1773 for row in rows
1774 if row["state_group"] not in state_groups_to_delete
1775 )
1776
1777 logger.info(
1778 "[purge] de-delta-ing %i remaining state groups",
1779 len(remaining_state_groups),
1780 )
1781
1782 # Now we turn the state groups that reference to-be-deleted state
1783 # groups to non delta versions.
1784 for sg in remaining_state_groups:
1785 logger.info("[purge] de-delta-ing remaining state group %s", sg)
1786 curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
1787 curr_state = curr_state[sg]
1788
1789 self._simple_delete_txn(
1790 txn, table="state_groups_state", keyvalues={"state_group": sg}
1791 )
1792
1793 self._simple_delete_txn(
1794 txn, table="state_group_edges", keyvalues={"state_group": sg}
1795 )
1796
1797 self._simple_insert_many_txn(
1798 txn,
1799 table="state_groups_state",
1800 values=[
1801 {
1802 "state_group": sg,
1803 "room_id": room_id,
1804 "type": key[0],
1805 "state_key": key[1],
1806 "event_id": state_id,
1807 }
1808 for key, state_id in iteritems(curr_state)
1809 ],
1810 )
1811
1812 logger.info("[purge] removing redundant state groups")
1813 txn.executemany(
1814 "DELETE FROM state_groups_state WHERE state_group = ?",
1815 ((sg,) for sg in state_groups_to_delete),
1816 )
1817 txn.executemany(
1818 "DELETE FROM state_groups WHERE id = ?",
1819 ((sg,) for sg in state_groups_to_delete),
1820 )
1821
24411822 @defer.inlineCallbacks
2442 def is_event_after(self, event_id1, event_id2):
1823 def get_previous_state_groups(self, state_groups):
1824 """Fetch the previous groups of the given state groups.
1825
1826 Args:
1827 state_groups (Iterable[int])
1828
1829 Returns:
1830 Deferred[dict[int, int]]: mapping from state group to previous
1831 state group.
1832 """
1833
1834 rows = yield self._simple_select_many_batch(
1835 table="state_group_edges",
1836 column="prev_state_group",
1837 iterable=state_groups,
1838 keyvalues={},
1839 retcols=("prev_state_group", "state_group"),
1840 desc="get_previous_state_groups",
1841 )
1842
1843 return {row["state_group"]: row["prev_state_group"] for row in rows}
1844
1845 def purge_room_state(self, room_id, state_groups_to_delete):
1846 """Deletes all record of a room from state tables
1847
1848 Args:
1849 room_id (str):
1850 state_groups_to_delete (list[int]): State groups to delete
1851 """
1852
1853 return self.runInteraction(
1854 "purge_room_state",
1855 self._purge_room_state_txn,
1856 room_id,
1857 state_groups_to_delete,
1858 )
1859
1860 def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete):
1861 # first we have to delete the state groups states
1862 logger.info("[purge] removing %s from state_groups_state", room_id)
1863
1864 self._simple_delete_many_txn(
1865 txn,
1866 table="state_groups_state",
1867 column="state_group",
1868 iterable=state_groups_to_delete,
1869 keyvalues={},
1870 )
1871
1872 # ... and the state group edges
1873 logger.info("[purge] removing %s from state_group_edges", room_id)
1874
1875 self._simple_delete_many_txn(
1876 txn,
1877 table="state_group_edges",
1878 column="state_group",
1879 iterable=state_groups_to_delete,
1880 keyvalues={},
1881 )
1882
1883 # ... and the state groups
1884 logger.info("[purge] removing %s from state_groups", room_id)
1885
1886 self._simple_delete_many_txn(
1887 txn,
1888 table="state_groups",
1889 column="id",
1890 iterable=state_groups_to_delete,
1891 keyvalues={},
1892 )
1893
1894 async def is_event_after(self, event_id1, event_id2):
24431895 """Returns True if event_id1 is after event_id2 in the stream
24441896 """
2445 to_1, so_1 = yield self._get_event_ordering(event_id1)
2446 to_2, so_2 = yield self._get_event_ordering(event_id2)
1897 to_1, so_1 = await self._get_event_ordering(event_id1)
1898 to_2, so_2 = await self._get_event_ordering(event_id2)
24471899 return (to_1, so_1) > (to_2, so_2)
24481900
24491901 @cachedInlineCallbacks(max_entries=5000)
24761928 get_all_updated_current_state_deltas_txn,
24771929 )
24781930
1931 def insert_labels_for_event_txn(
1932 self, txn, event_id, labels, room_id, topological_ordering
1933 ):
1934 """Store the mapping between an event's ID and its labels, with one row per
1935 (event_id, label) tuple.
1936
1937 Args:
1938 txn (LoggingTransaction): The transaction to execute.
1939 event_id (str): The event's ID.
1940 labels (list[str]): A list of text labels.
1941 room_id (str): The ID of the room the event was sent to.
1942 topological_ordering (int): The position of the event in the room's topology.
1943 """
1944 return self._simple_insert_many_txn(
1945 txn=txn,
1946 table="event_labels",
1947 values=[
1948 {
1949 "event_id": event_id,
1950 "label": label,
1951 "room_id": room_id,
1952 "topological_ordering": topological_ordering,
1953 }
1954 for label in labels
1955 ],
1956 )
1957
24791958
24801959 AllNewEventsResult = namedtuple(
24811960 "AllNewEventsResult",
2020
2121 from twisted.internet import defer
2222
23 from synapse.api.constants import EventContentFields
2324 from synapse.storage._base import make_in_list_sql_clause
2425 from synapse.storage.background_updates import BackgroundUpdateStore
2526
8283
8384 self.register_background_update_handler(
8485 "event_fix_redactions_bytes", self._event_fix_redactions_bytes
86 )
87
88 self.register_background_update_handler(
89 "event_store_labels", self._event_store_labels
8590 )
8691
8792 @defer.inlineCallbacks
437442 if not rows:
438443 return 0
439444
440 upper_event_id, = rows[-1]
445 (upper_event_id,) = rows[-1]
441446
442447 # Update the redactions with the received_ts.
443448 #
502507 yield self._end_background_update("event_fix_redactions_bytes")
503508
504509 return 1
510
511 @defer.inlineCallbacks
512 def _event_store_labels(self, progress, batch_size):
513 """Background update handler which will store labels for existing events."""
514 last_event_id = progress.get("last_event_id", "")
515
516 def _event_store_labels_txn(txn):
517 txn.execute(
518 """
519 SELECT event_id, json FROM event_json
520 LEFT JOIN event_labels USING (event_id)
521 WHERE event_id > ? AND label IS NULL
522 ORDER BY event_id LIMIT ?
523 """,
524 (last_event_id, batch_size),
525 )
526
527 results = list(txn)
528
529 nbrows = 0
530 last_row_event_id = ""
531 for (event_id, event_json_raw) in results:
532 try:
533 event_json = json.loads(event_json_raw)
534
535 self._simple_insert_many_txn(
536 txn=txn,
537 table="event_labels",
538 values=[
539 {
540 "event_id": event_id,
541 "label": label,
542 "room_id": event_json["room_id"],
543 "topological_ordering": event_json["depth"],
544 }
545 for label in event_json["content"].get(
546 EventContentFields.LABELS, []
547 )
548 if isinstance(label, str)
549 ],
550 )
551 except Exception as e:
552 logger.warning(
553 "Unable to load event %s (no labels will be imported): %s",
554 event_id,
555 e,
556 )
557
558 nbrows += 1
559 last_row_event_id = event_id
560
561 self._background_update_progress_txn(
562 txn, "event_store_labels", {"last_event_id": last_row_event_id}
563 )
564
565 return nbrows
566
567 num_rows = yield self.runInteraction(
568 desc="event_store_labels", func=_event_store_labels_txn
569 )
570
571 if not num_rows:
572 yield self._end_background_update("event_store_labels")
573
574 return num_rows
248248 WHERE group_id = ? AND category_id = ?
249249 """
250250 txn.execute(sql, (group_id, category_id))
251 order, = txn.fetchone()
251 (order,) = txn.fetchone()
252252
253253 if existing:
254254 to_update = {}
508508 WHERE group_id = ? AND role_id = ?
509509 """
510510 txn.execute(sql, (group_id, role_id))
511 order, = txn.fetchone()
511 (order,) = txn.fetchone()
512512
513513 if existing:
514514 to_update = {}
550550 table="group_summary_users",
551551 keyvalues={"group_id": group_id, "role_id": role_id, "user_id": user_id},
552552 desc="remove_user_from_summary",
553 )
554
555 def get_local_groups_for_room(self, room_id):
556 """Get all of the local group that contain a given room
557 Args:
558 room_id (str): The ID of a room
559 Returns:
560 Deferred[list[str]]: A twisted.Deferred containing a list of group ids
561 containing this room
562 """
563 return self._simple_select_onecol(
564 table="group_rooms",
565 keyvalues={"room_id": room_id},
566 retcol="group_id",
567 desc="get_local_groups_for_room",
553568 )
554569
555570 def get_users_for_summary_by_role(self, group_id, include_private=False):
170170 sql = "SELECT COALESCE(count(*), 0) FROM monthly_active_users"
171171
172172 txn.execute(sql)
173 count, = txn.fetchone()
173 (count,) = txn.fetchone()
174174 return count
175175
176176 return self.runInteraction("count_users", _count_users)
142142 " WHERE user_id = ? AND ? < stream_id"
143143 )
144144 txn.execute(sql, (user_id, last_id))
145 count, = txn.fetchone()
145 (count,) = txn.fetchone()
146146 return bool(count)
147147
148148 return self.runInteraction(
4343
4444 r["data"] = json.loads(dataJson)
4545 except Exception as e:
46 logger.warn(
46 logger.warning(
4747 "Invalid JSON in data for pusher %d: %s, %s",
4848 r["id"],
4949 dataJson,
458458 WHERE appservice_id IS NULL
459459 """
460460 )
461 count, = txn.fetchone()
461 (count,) = txn.fetchone()
462462 return count
463463
464464 ret = yield self.runInteraction("count_users", _count_users)
487487 we can. Unfortunately, it's possible some of them are already taken by
488488 existing users, and there may be gaps in the already taken range. This
489489 function returns the start of the first allocatable gap. This is to
490 avoid the case of ID 10000000 being pre-allocated, so us wasting the
491 first (and shortest) many generated user IDs.
490 avoid the case of ID 1000 being pre-allocated and starting at 1001 while
491 0-999 are available.
492492 """
493493
494494 def _find_next_generated_user_id(txn):
495 # We bound between '@1' and '@a' to avoid pulling the entire table
495 # We bound between '@0' and '@a' to avoid pulling the entire table
496496 # out.
497 txn.execute("SELECT name FROM users WHERE '@1' <= name AND name < '@a'")
497 txn.execute("SELECT name FROM users WHERE '@0' <= name AND name < '@a'")
498498
499499 regex = re.compile(r"^@(\d+):")
500500
719719 # See bulk_get_push_rules_for_room for how we work around this.
720720 assert state_group is not None
721721
722 cache = self._get_joined_hosts_cache(room_id)
722 cache = yield self._get_joined_hosts_cache(room_id)
723723 joined_hosts = yield cache.get_destinations(state_entry)
724724
725725 return joined_hosts
926926 if not row or not row[0]:
927927 return processed, True
928928
929 next_room, = row
929 (next_room,) = row
930930
931931 sql = """
932932 UPDATE current_state_events
0 /* Copyright 2019 The Matrix.org Foundation C.I.C
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 /* delete room keys that belong to deleted room key version, or to room key
16 * versions that don't exist (anymore)
17 */
18 DELETE FROM e2e_room_keys
19 WHERE version NOT IN (
20 SELECT version
21 FROM e2e_room_keys_versions
22 WHERE e2e_room_keys.user_id = e2e_room_keys_versions.user_id
23 AND e2e_room_keys_versions.deleted = 0
24 );
0 /* Copyright 2019 The Matrix.org Foundation C.I.C.
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 -- room_id and topoligical_ordering are denormalised from the events table in order to
16 -- make the index work.
17 CREATE TABLE IF NOT EXISTS event_labels (
18 event_id TEXT,
19 label TEXT,
20 room_id TEXT NOT NULL,
21 topological_ordering BIGINT NOT NULL,
22 PRIMARY KEY(event_id, label)
23 );
24
25
26 -- This index enables an event pagination looking for a particular label to index the
27 -- event_labels table first, which is much quicker than scanning the events table and then
28 -- filtering by label, if the label is rarely used relative to the size of the room.
29 CREATE INDEX event_labels_room_id_label_idx ON event_labels(room_id, label, topological_ordering);
0 /* Copyright 2019 The Matrix.org Foundation C.I.C.
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 INSERT INTO background_updates (update_name, progress_json) VALUES
16 ('event_store_labels', '{}');
0 /* Copyright 2019 The Matrix.org Foundation C.I.C.
1 *
2 * Licensed under the Apache License, Version 2.0 (the "License");
3 * you may not use this file except in compliance with the License.
4 * You may obtain a copy of the License at
5 *
6 * http://www.apache.org/licenses/LICENSE-2.0
7 *
8 * Unless required by applicable law or agreed to in writing, software
9 * distributed under the License is distributed on an "AS IS" BASIS,
10 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 * See the License for the specific language governing permissions and
12 * limitations under the License.
13 */
14
15 /* Change the hidden column from a default value of FALSE to a default value of
16 * 0, because sqlite3 prior to 3.23.0 caused the hidden column to contain the
17 * string 'FALSE', which is truthy.
18 *
19 * Since sqlite doesn't allow us to just change the default value, we have to
20 * recreate the table, copy the data, fix the rows that have incorrect data, and
21 * replace the old table with the new table.
22 */
23
24 CREATE TABLE IF NOT EXISTS devices2 (
25 user_id TEXT NOT NULL,
26 device_id TEXT NOT NULL,
27 display_name TEXT,
28 last_seen BIGINT,
29 ip TEXT,
30 user_agent TEXT,
31 hidden BOOLEAN DEFAULT 0,
32 CONSTRAINT device_uniqueness UNIQUE (user_id, device_id)
33 );
34
35 INSERT INTO devices2 SELECT * FROM devices;
36
37 UPDATE devices2 SET hidden = 0 WHERE hidden = 'FALSE';
38
39 DROP TABLE devices;
40
41 ALTER TABLE devices2 RENAME TO devices;
195195 " ON event_search USING GIN (vector)"
196196 )
197197 except psycopg2.ProgrammingError as e:
198 logger.warn(
198 logger.warning(
199199 "Ignoring error %r when trying to switch from GIST to GIN", e
200200 )
201201
671671 )
672672 )
673673 txn.execute(query, (value, search_query))
674 headline, = txn.fetchall()[0]
674 (headline,) = txn.fetchall()[0]
675675
676676 # Now we need to pick the possible highlights out of the haedline
677677 # result.
1414
1515 import logging
1616 from collections import namedtuple
17 from typing import Iterable, Tuple
1718
1819 from six import iteritems, itervalues
1920 from six.moves import range
2223
2324 from synapse.api.constants import EventTypes
2425 from synapse.api.errors import NotFoundError
26 from synapse.events import EventBase
27 from synapse.events.snapshot import EventContext
2528 from synapse.storage._base import SQLBaseStore
2629 from synapse.storage.background_updates import BackgroundUpdateStore
2730 from synapse.storage.data_stores.main.events_worker import EventsWorkerStore
281284 room_id (str)
282285
283286 Returns:
284 Deferred[unicode|None]: predecessor room id
287 Deferred[dict|None]: A dictionary containing the structure of the predecessor
288 field from the room's create event. The structure is subject to other servers,
289 but it is expected to be:
290 * room_id (str): The room ID of the predecessor room
291 * event_id (str): The ID of the tombstone event in the predecessor room
285292
286293 Raises:
287294 NotFoundError if the room is unknown
721728 member_filter, non_member_filter = state_filter.get_member_split()
722729
723730 # Now we look them up in the member and non-member caches
724 non_member_state, incomplete_groups_nm, = (
725 yield self._get_state_for_groups_using_cache(
726 groups, self._state_group_cache, state_filter=non_member_filter
727 )
728 )
729
730 member_state, incomplete_groups_m, = (
731 yield self._get_state_for_groups_using_cache(
732 groups, self._state_group_members_cache, state_filter=member_filter
733 )
731 (
732 non_member_state,
733 incomplete_groups_nm,
734 ) = yield self._get_state_for_groups_using_cache(
735 groups, self._state_group_cache, state_filter=non_member_filter
736 )
737
738 (
739 member_state,
740 incomplete_groups_m,
741 ) = yield self._get_state_for_groups_using_cache(
742 groups, self._state_group_members_cache, state_filter=member_filter
734743 )
735744
736745 state = dict(non_member_state)
985994
986995 return self.runInteraction("store_state_group", _store_state_group_txn)
987996
997 @defer.inlineCallbacks
998 def get_referenced_state_groups(self, state_groups):
999 """Check if the state groups are referenced by events.
1000
1001 Args:
1002 state_groups (Iterable[int])
1003
1004 Returns:
1005 Deferred[set[int]]: The subset of state groups that are
1006 referenced.
1007 """
1008
1009 rows = yield self._simple_select_many_batch(
1010 table="event_to_state_groups",
1011 column="state_group",
1012 iterable=state_groups,
1013 keyvalues={},
1014 retcols=("DISTINCT state_group",),
1015 desc="get_referenced_state_groups",
1016 )
1017
1018 return set(row["state_group"] for row in rows)
1019
9881020
9891021 class StateBackgroundUpdateStore(
9901022 StateGroupBackgroundUpdateStore, BackgroundUpdateStore
10721104 " WHERE id < ? AND room_id = ?",
10731105 (state_group, room_id),
10741106 )
1075 prev_group, = txn.fetchone()
1107 (prev_group,) = txn.fetchone()
10761108 new_last_state_group = state_group
10771109
10781110 if prev_group:
12141246 def __init__(self, db_conn, hs):
12151247 super(StateStore, self).__init__(db_conn, hs)
12161248
1217 def _store_event_state_mappings_txn(self, txn, events_and_contexts):
1249 def _store_event_state_mappings_txn(
1250 self, txn, events_and_contexts: Iterable[Tuple[EventBase, EventContext]]
1251 ):
12181252 state_groups = {}
12191253 for event, context in events_and_contexts:
12201254 if event.internal_metadata.is_outlier():
12231257 # if the event was rejected, just give it the same state as its
12241258 # predecessor.
12251259 if context.rejected:
1226 state_groups[event.event_id] = context.prev_group
1260 state_groups[event.event_id] = context.state_group_before_event
12271261 continue
12281262
12291263 state_groups[event.event_id] = context.state_group
331331 def _bulk_update_stats_delta_txn(txn):
332332 for stats_type, stats_updates in updates.items():
333333 for stats_id, fields in stats_updates.items():
334 logger.info(
334 logger.debug(
335335 "Updating %s stats for %s: %s", stats_type, stats_id, fields
336336 )
337337 self._update_stats_delta_txn(
772772 (room_id,),
773773 )
774774
775 current_state_events_count, = txn.fetchone()
775 (current_state_events_count,) = txn.fetchone()
776776
777777 users_in_room = self.get_users_in_room_txn(txn, room_id)
778778
862862 """,
863863 (user_id,),
864864 )
865 count, = txn.fetchone()
865 (count,) = txn.fetchone()
866866 return count, pos
867867
868868 joined_rooms, pos = yield self.runInteraction(
227227 if event_filter.contains_url:
228228 clauses.append("contains_url = ?")
229229 args.append(event_filter.contains_url)
230
231 # We're only applying the "labels" filter on the database query, because applying the
232 # "not_labels" filter via a SQL query is non-trivial. Instead, we let
233 # event_filter.check_fields apply it, which is not as efficient but makes the
234 # implementation simpler.
235 if event_filter.labels:
236 clauses.append("(%s)" % " OR ".join("label = ?" for _ in event_filter.labels))
237 args.extend(event_filter.labels)
230238
231239 return " AND ".join(clauses), args
232240
862870
863871 args.append(int(limit))
864872
865 sql = (
866 "SELECT event_id, topological_ordering, stream_ordering"
867 " FROM events"
868 " WHERE outlier = ? AND room_id = ? AND %(bounds)s"
869 " ORDER BY topological_ordering %(order)s,"
870 " stream_ordering %(order)s LIMIT ?"
871 ) % {"bounds": bounds, "order": order}
873 select_keywords = "SELECT"
874 join_clause = ""
875 if event_filter and event_filter.labels:
876 # If we're not filtering on a label, then joining on event_labels will
877 # return as many row for a single event as the number of labels it has. To
878 # avoid this, only join if we're filtering on at least one label.
879 join_clause = """
880 LEFT JOIN event_labels
881 USING (event_id, room_id, topological_ordering)
882 """
883 if len(event_filter.labels) > 1:
884 # Using DISTINCT in this SELECT query is quite expensive, because it
885 # requires the engine to sort on the entire (not limited) result set,
886 # i.e. the entire events table. We only need to use it when we're
887 # filtering on more than two labels, because that's the only scenario
888 # in which we can possibly to get multiple times the same event ID in
889 # the results.
890 select_keywords += "DISTINCT"
891
892 sql = """
893 %(select_keywords)s event_id, topological_ordering, stream_ordering
894 FROM events
895 %(join_clause)s
896 WHERE outlier = ? AND room_id = ? AND %(bounds)s
897 ORDER BY topological_ordering %(order)s,
898 stream_ordering %(order)s LIMIT ?
899 """ % {
900 "select_keywords": select_keywords,
901 "join_clause": join_clause,
902 "bounds": bounds,
903 "order": order,
904 }
872905
873906 txn.execute(sql, args)
874907
0 # -*- coding: utf-8 -*-
1 # Copyright 2014-2016 OpenMarket Ltd
2 # Copyright 2018-2019 New Vector Ltd
3 # Copyright 2019 The Matrix.org Foundation C.I.C.
4 #
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
8 #
9 # http://www.apache.org/licenses/LICENSE-2.0
10 #
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
16
17 import logging
18 from collections import deque, namedtuple
19
20 from six import iteritems
21 from six.moves import range
22
23 from prometheus_client import Counter, Histogram
24
25 from twisted.internet import defer
26
27 from synapse.api.constants import EventTypes
28 from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable
29 from synapse.metrics.background_process_metrics import run_as_background_process
30 from synapse.state import StateResolutionStore
31 from synapse.storage.data_stores import DataStores
32 from synapse.util.async_helpers import ObservableDeferred
33 from synapse.util.metrics import Measure
34
35 logger = logging.getLogger(__name__)
36
37 # The number of times we are recalculating the current state
38 state_delta_counter = Counter("synapse_storage_events_state_delta", "")
39
40 # The number of times we are recalculating state when there is only a
41 # single forward extremity
42 state_delta_single_event_counter = Counter(
43 "synapse_storage_events_state_delta_single_event", ""
44 )
45
46 # The number of times we are reculating state when we could have resonably
47 # calculated the delta when we calculated the state for an event we were
48 # persisting.
49 state_delta_reuse_delta_counter = Counter(
50 "synapse_storage_events_state_delta_reuse_delta", ""
51 )
52
53 # The number of forward extremities for each new event.
54 forward_extremities_counter = Histogram(
55 "synapse_storage_events_forward_extremities_persisted",
56 "Number of forward extremities for each new event",
57 buckets=(1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
58 )
59
60 # The number of stale forward extremities for each new event. Stale extremities
61 # are those that were in the previous set of extremities as well as the new.
62 stale_forward_extremities_counter = Histogram(
63 "synapse_storage_events_stale_forward_extremities_persisted",
64 "Number of unchanged forward extremities for each new event",
65 buckets=(0, 1, 2, 3, 5, 7, 10, 15, 20, 50, 100, 200, 500, "+Inf"),
66 )
67
68
69 class _EventPeristenceQueue(object):
70 """Queues up events so that they can be persisted in bulk with only one
71 concurrent transaction per room.
72 """
73
74 _EventPersistQueueItem = namedtuple(
75 "_EventPersistQueueItem", ("events_and_contexts", "backfilled", "deferred")
76 )
77
78 def __init__(self):
79 self._event_persist_queues = {}
80 self._currently_persisting_rooms = set()
81
82 def add_to_queue(self, room_id, events_and_contexts, backfilled):
83 """Add events to the queue, with the given persist_event options.
84
85 NB: due to the normal usage pattern of this method, it does *not*
86 follow the synapse logcontext rules, and leaves the logcontext in
87 place whether or not the returned deferred is ready.
88
89 Args:
90 room_id (str):
91 events_and_contexts (list[(EventBase, EventContext)]):
92 backfilled (bool):
93
94 Returns:
95 defer.Deferred: a deferred which will resolve once the events are
96 persisted. Runs its callbacks *without* a logcontext.
97 """
98 queue = self._event_persist_queues.setdefault(room_id, deque())
99 if queue:
100 # if the last item in the queue has the same `backfilled` setting,
101 # we can just add these new events to that item.
102 end_item = queue[-1]
103 if end_item.backfilled == backfilled:
104 end_item.events_and_contexts.extend(events_and_contexts)
105 return end_item.deferred.observe()
106
107 deferred = ObservableDeferred(defer.Deferred(), consumeErrors=True)
108
109 queue.append(
110 self._EventPersistQueueItem(
111 events_and_contexts=events_and_contexts,
112 backfilled=backfilled,
113 deferred=deferred,
114 )
115 )
116
117 return deferred.observe()
118
119 def handle_queue(self, room_id, per_item_callback):
120 """Attempts to handle the queue for a room if not already being handled.
121
122 The given callback will be invoked with for each item in the queue,
123 of type _EventPersistQueueItem. The per_item_callback will continuously
124 be called with new items, unless the queue becomnes empty. The return
125 value of the function will be given to the deferreds waiting on the item,
126 exceptions will be passed to the deferreds as well.
127
128 This function should therefore be called whenever anything is added
129 to the queue.
130
131 If another callback is currently handling the queue then it will not be
132 invoked.
133 """
134
135 if room_id in self._currently_persisting_rooms:
136 return
137
138 self._currently_persisting_rooms.add(room_id)
139
140 @defer.inlineCallbacks
141 def handle_queue_loop():
142 try:
143 queue = self._get_drainining_queue(room_id)
144 for item in queue:
145 try:
146 ret = yield per_item_callback(item)
147 except Exception:
148 with PreserveLoggingContext():
149 item.deferred.errback()
150 else:
151 with PreserveLoggingContext():
152 item.deferred.callback(ret)
153 finally:
154 queue = self._event_persist_queues.pop(room_id, None)
155 if queue:
156 self._event_persist_queues[room_id] = queue
157 self._currently_persisting_rooms.discard(room_id)
158
159 # set handle_queue_loop off in the background
160 run_as_background_process("persist_events", handle_queue_loop)
161
162 def _get_drainining_queue(self, room_id):
163 queue = self._event_persist_queues.setdefault(room_id, deque())
164
165 try:
166 while True:
167 yield queue.popleft()
168 except IndexError:
169 # Queue has been drained.
170 pass
171
172
173 class EventsPersistenceStorage(object):
174 """High level interface for handling persisting newly received events.
175
176 Takes care of batching up events by room, and calculating the necessary
177 current state and forward extremity changes.
178 """
179
180 def __init__(self, hs, stores: DataStores):
181 # We ultimately want to split out the state store from the main store,
182 # so we use separate variables here even though they point to the same
183 # store for now.
184 self.main_store = stores.main
185 self.state_store = stores.main
186
187 self._clock = hs.get_clock()
188 self.is_mine_id = hs.is_mine_id
189 self._event_persist_queue = _EventPeristenceQueue()
190 self._state_resolution_handler = hs.get_state_resolution_handler()
191
192 @defer.inlineCallbacks
193 def persist_events(self, events_and_contexts, backfilled=False):
194 """
195 Write events to the database
196 Args:
197 events_and_contexts: list of tuples of (event, context)
198 backfilled (bool): Whether the results are retrieved from federation
199 via backfill or not. Used to determine if they're "new" events
200 which might update the current state etc.
201
202 Returns:
203 Deferred[int]: the stream ordering of the latest persisted event
204 """
205 partitioned = {}
206 for event, ctx in events_and_contexts:
207 partitioned.setdefault(event.room_id, []).append((event, ctx))
208
209 deferreds = []
210 for room_id, evs_ctxs in iteritems(partitioned):
211 d = self._event_persist_queue.add_to_queue(
212 room_id, evs_ctxs, backfilled=backfilled
213 )
214 deferreds.append(d)
215
216 for room_id in partitioned:
217 self._maybe_start_persisting(room_id)
218
219 yield make_deferred_yieldable(
220 defer.gatherResults(deferreds, consumeErrors=True)
221 )
222
223 max_persisted_id = yield self.main_store.get_current_events_token()
224
225 return max_persisted_id
226
227 @defer.inlineCallbacks
228 def persist_event(self, event, context, backfilled=False):
229 """
230
231 Args:
232 event (EventBase):
233 context (EventContext):
234 backfilled (bool):
235
236 Returns:
237 Deferred: resolves to (int, int): the stream ordering of ``event``,
238 and the stream ordering of the latest persisted event
239 """
240 deferred = self._event_persist_queue.add_to_queue(
241 event.room_id, [(event, context)], backfilled=backfilled
242 )
243
244 self._maybe_start_persisting(event.room_id)
245
246 yield make_deferred_yieldable(deferred)
247
248 max_persisted_id = yield self.main_store.get_current_events_token()
249 return (event.internal_metadata.stream_ordering, max_persisted_id)
250
251 def _maybe_start_persisting(self, room_id):
252 @defer.inlineCallbacks
253 def persisting_queue(item):
254 with Measure(self._clock, "persist_events"):
255 yield self._persist_events(
256 item.events_and_contexts, backfilled=item.backfilled
257 )
258
259 self._event_persist_queue.handle_queue(room_id, persisting_queue)
260
261 @defer.inlineCallbacks
262 def _persist_events(self, events_and_contexts, backfilled=False):
263 """Calculates the change to current state and forward extremities, and
264 persists the given events and with those updates.
265
266 Args:
267 events_and_contexts (list[(EventBase, EventContext)]):
268 backfilled (bool):
269 delete_existing (bool):
270
271 Returns:
272 Deferred: resolves when the events have been persisted
273 """
274 if not events_and_contexts:
275 return
276
277 chunks = [
278 events_and_contexts[x : x + 100]
279 for x in range(0, len(events_and_contexts), 100)
280 ]
281
282 for chunk in chunks:
283 # We can't easily parallelize these since different chunks
284 # might contain the same event. :(
285
286 # NB: Assumes that we are only persisting events for one room
287 # at a time.
288
289 # map room_id->list[event_ids] giving the new forward
290 # extremities in each room
291 new_forward_extremeties = {}
292
293 # map room_id->(type,state_key)->event_id tracking the full
294 # state in each room after adding these events.
295 # This is simply used to prefill the get_current_state_ids
296 # cache
297 current_state_for_room = {}
298
299 # map room_id->(to_delete, to_insert) where to_delete is a list
300 # of type/state keys to remove from current state, and to_insert
301 # is a map (type,key)->event_id giving the state delta in each
302 # room
303 state_delta_for_room = {}
304
305 if not backfilled:
306 with Measure(self._clock, "_calculate_state_and_extrem"):
307 # Work out the new "current state" for each room.
308 # We do this by working out what the new extremities are and then
309 # calculating the state from that.
310 events_by_room = {}
311 for event, context in chunk:
312 events_by_room.setdefault(event.room_id, []).append(
313 (event, context)
314 )
315
316 for room_id, ev_ctx_rm in iteritems(events_by_room):
317 latest_event_ids = yield self.main_store.get_latest_event_ids_in_room(
318 room_id
319 )
320 new_latest_event_ids = yield self._calculate_new_extremities(
321 room_id, ev_ctx_rm, latest_event_ids
322 )
323
324 latest_event_ids = set(latest_event_ids)
325 if new_latest_event_ids == latest_event_ids:
326 # No change in extremities, so no change in state
327 continue
328
329 # there should always be at least one forward extremity.
330 # (except during the initial persistence of the send_join
331 # results, in which case there will be no existing
332 # extremities, so we'll `continue` above and skip this bit.)
333 assert new_latest_event_ids, "No forward extremities left!"
334
335 new_forward_extremeties[room_id] = new_latest_event_ids
336
337 len_1 = (
338 len(latest_event_ids) == 1
339 and len(new_latest_event_ids) == 1
340 )
341 if len_1:
342 all_single_prev_not_state = all(
343 len(event.prev_event_ids()) == 1
344 and not event.is_state()
345 for event, ctx in ev_ctx_rm
346 )
347 # Don't bother calculating state if they're just
348 # a long chain of single ancestor non-state events.
349 if all_single_prev_not_state:
350 continue
351
352 state_delta_counter.inc()
353 if len(new_latest_event_ids) == 1:
354 state_delta_single_event_counter.inc()
355
356 # This is a fairly handwavey check to see if we could
357 # have guessed what the delta would have been when
358 # processing one of these events.
359 # What we're interested in is if the latest extremities
360 # were the same when we created the event as they are
361 # now. When this server creates a new event (as opposed
362 # to receiving it over federation) it will use the
363 # forward extremities as the prev_events, so we can
364 # guess this by looking at the prev_events and checking
365 # if they match the current forward extremities.
366 for ev, _ in ev_ctx_rm:
367 prev_event_ids = set(ev.prev_event_ids())
368 if latest_event_ids == prev_event_ids:
369 state_delta_reuse_delta_counter.inc()
370 break
371
372 logger.info("Calculating state delta for room %s", room_id)
373 with Measure(
374 self._clock, "persist_events.get_new_state_after_events"
375 ):
376 res = yield self._get_new_state_after_events(
377 room_id,
378 ev_ctx_rm,
379 latest_event_ids,
380 new_latest_event_ids,
381 )
382 current_state, delta_ids = res
383
384 # If either are not None then there has been a change,
385 # and we need to work out the delta (or use that
386 # given)
387 if delta_ids is not None:
388 # If there is a delta we know that we've
389 # only added or replaced state, never
390 # removed keys entirely.
391 state_delta_for_room[room_id] = ([], delta_ids)
392 elif current_state is not None:
393 with Measure(
394 self._clock, "persist_events.calculate_state_delta"
395 ):
396 delta = yield self._calculate_state_delta(
397 room_id, current_state
398 )
399 state_delta_for_room[room_id] = delta
400
401 # If we have the current_state then lets prefill
402 # the cache with it.
403 if current_state is not None:
404 current_state_for_room[room_id] = current_state
405
406 yield self.main_store._persist_events_and_state_updates(
407 chunk,
408 current_state_for_room=current_state_for_room,
409 state_delta_for_room=state_delta_for_room,
410 new_forward_extremeties=new_forward_extremeties,
411 backfilled=backfilled,
412 )
413
414 @defer.inlineCallbacks
415 def _calculate_new_extremities(self, room_id, event_contexts, latest_event_ids):
416 """Calculates the new forward extremities for a room given events to
417 persist.
418
419 Assumes that we are only persisting events for one room at a time.
420 """
421
422 # we're only interested in new events which aren't outliers and which aren't
423 # being rejected.
424 new_events = [
425 event
426 for event, ctx in event_contexts
427 if not event.internal_metadata.is_outlier()
428 and not ctx.rejected
429 and not event.internal_metadata.is_soft_failed()
430 ]
431
432 latest_event_ids = set(latest_event_ids)
433
434 # start with the existing forward extremities
435 result = set(latest_event_ids)
436
437 # add all the new events to the list
438 result.update(event.event_id for event in new_events)
439
440 # Now remove all events which are prev_events of any of the new events
441 result.difference_update(
442 e_id for event in new_events for e_id in event.prev_event_ids()
443 )
444
445 # Remove any events which are prev_events of any existing events.
446 existing_prevs = yield self.main_store._get_events_which_are_prevs(result)
447 result.difference_update(existing_prevs)
448
449 # Finally handle the case where the new events have soft-failed prev
450 # events. If they do we need to remove them and their prev events,
451 # otherwise we end up with dangling extremities.
452 existing_prevs = yield self.main_store._get_prevs_before_rejected(
453 e_id for event in new_events for e_id in event.prev_event_ids()
454 )
455 result.difference_update(existing_prevs)
456
457 # We only update metrics for events that change forward extremities
458 # (e.g. we ignore backfill/outliers/etc)
459 if result != latest_event_ids:
460 forward_extremities_counter.observe(len(result))
461 stale = latest_event_ids & result
462 stale_forward_extremities_counter.observe(len(stale))
463
464 return result
465
466 @defer.inlineCallbacks
467 def _get_new_state_after_events(
468 self, room_id, events_context, old_latest_event_ids, new_latest_event_ids
469 ):
470 """Calculate the current state dict after adding some new events to
471 a room
472
473 Args:
474 room_id (str):
475 room to which the events are being added. Used for logging etc
476
477 events_context (list[(EventBase, EventContext)]):
478 events and contexts which are being added to the room
479
480 old_latest_event_ids (iterable[str]):
481 the old forward extremities for the room.
482
483 new_latest_event_ids (iterable[str]):
484 the new forward extremities for the room.
485
486 Returns:
487 Deferred[tuple[dict[(str,str), str]|None, dict[(str,str), str]|None]]:
488 Returns a tuple of two state maps, the first being the full new current
489 state and the second being the delta to the existing current state.
490 If both are None then there has been no change.
491
492 If there has been a change then we only return the delta if its
493 already been calculated. Conversely if we do know the delta then
494 the new current state is only returned if we've already calculated
495 it.
496 """
497 # map from state_group to ((type, key) -> event_id) state map
498 state_groups_map = {}
499
500 # Map from (prev state group, new state group) -> delta state dict
501 state_group_deltas = {}
502
503 for ev, ctx in events_context:
504 if ctx.state_group is None:
505 # This should only happen for outlier events.
506 if not ev.internal_metadata.is_outlier():
507 raise Exception(
508 "Context for new event %s has no state "
509 "group" % (ev.event_id,)
510 )
511 continue
512
513 if ctx.state_group in state_groups_map:
514 continue
515
516 # We're only interested in pulling out state that has already
517 # been cached in the context. We'll pull stuff out of the DB later
518 # if necessary.
519 current_state_ids = ctx.get_cached_current_state_ids()
520 if current_state_ids is not None:
521 state_groups_map[ctx.state_group] = current_state_ids
522
523 if ctx.prev_group:
524 state_group_deltas[(ctx.prev_group, ctx.state_group)] = ctx.delta_ids
525
526 # We need to map the event_ids to their state groups. First, let's
527 # check if the event is one we're persisting, in which case we can
528 # pull the state group from its context.
529 # Otherwise we need to pull the state group from the database.
530
531 # Set of events we need to fetch groups for. (We know none of the old
532 # extremities are going to be in events_context).
533 missing_event_ids = set(old_latest_event_ids)
534
535 event_id_to_state_group = {}
536 for event_id in new_latest_event_ids:
537 # First search in the list of new events we're adding.
538 for ev, ctx in events_context:
539 if event_id == ev.event_id and ctx.state_group is not None:
540 event_id_to_state_group[event_id] = ctx.state_group
541 break
542 else:
543 # If we couldn't find it, then we'll need to pull
544 # the state from the database
545 missing_event_ids.add(event_id)
546
547 if missing_event_ids:
548 # Now pull out the state groups for any missing events from DB
549 event_to_groups = yield self.main_store._get_state_group_for_events(
550 missing_event_ids
551 )
552 event_id_to_state_group.update(event_to_groups)
553
554 # State groups of old_latest_event_ids
555 old_state_groups = set(
556 event_id_to_state_group[evid] for evid in old_latest_event_ids
557 )
558
559 # State groups of new_latest_event_ids
560 new_state_groups = set(
561 event_id_to_state_group[evid] for evid in new_latest_event_ids
562 )
563
564 # If they old and new groups are the same then we don't need to do
565 # anything.
566 if old_state_groups == new_state_groups:
567 return None, None
568
569 if len(new_state_groups) == 1 and len(old_state_groups) == 1:
570 # If we're going from one state group to another, lets check if
571 # we have a delta for that transition. If we do then we can just
572 # return that.
573
574 new_state_group = next(iter(new_state_groups))
575 old_state_group = next(iter(old_state_groups))
576
577 delta_ids = state_group_deltas.get((old_state_group, new_state_group), None)
578 if delta_ids is not None:
579 # We have a delta from the existing to new current state,
580 # so lets just return that. If we happen to already have
581 # the current state in memory then lets also return that,
582 # but it doesn't matter if we don't.
583 new_state = state_groups_map.get(new_state_group)
584 return new_state, delta_ids
585
586 # Now that we have calculated new_state_groups we need to get
587 # their state IDs so we can resolve to a single state set.
588 missing_state = new_state_groups - set(state_groups_map)
589 if missing_state:
590 group_to_state = yield self.state_store._get_state_for_groups(missing_state)
591 state_groups_map.update(group_to_state)
592
593 if len(new_state_groups) == 1:
594 # If there is only one state group, then we know what the current
595 # state is.
596 return state_groups_map[new_state_groups.pop()], None
597
598 # Ok, we need to defer to the state handler to resolve our state sets.
599
600 state_groups = {sg: state_groups_map[sg] for sg in new_state_groups}
601
602 events_map = {ev.event_id: ev for ev, _ in events_context}
603
604 # We need to get the room version, which is in the create event.
605 # Normally that'd be in the database, but its also possible that we're
606 # currently trying to persist it.
607 room_version = None
608 for ev, _ in events_context:
609 if ev.type == EventTypes.Create and ev.state_key == "":
610 room_version = ev.content.get("room_version", "1")
611 break
612
613 if not room_version:
614 room_version = yield self.main_store.get_room_version(room_id)
615
616 logger.debug("calling resolve_state_groups from preserve_events")
617 res = yield self._state_resolution_handler.resolve_state_groups(
618 room_id,
619 room_version,
620 state_groups,
621 events_map,
622 state_res_store=StateResolutionStore(self.main_store),
623 )
624
625 return res.state, None
626
627 @defer.inlineCallbacks
628 def _calculate_state_delta(self, room_id, current_state):
629 """Calculate the new state deltas for a room.
630
631 Assumes that we are only persisting events for one room at a time.
632
633 Returns:
634 tuple[list, dict] (to_delete, to_insert): where to_delete are the
635 type/state_keys to remove from current_state_events and `to_insert`
636 are the updates to current_state_events.
637 """
638 existing_state = yield self.main_store.get_current_state_ids(room_id)
639
640 to_delete = [key for key in existing_state if key not in current_state]
641
642 to_insert = {
643 key: ev_id
644 for key, ev_id in iteritems(current_state)
645 if ev_id != existing_state.get(key)
646 }
647
648 return to_delete, to_insert
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 The Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import itertools
16 import logging
17
18 from twisted.internet import defer
19
20 logger = logging.getLogger(__name__)
21
22
23 class PurgeEventsStorage(object):
24 """High level interface for purging rooms and event history.
25 """
26
27 def __init__(self, hs, stores):
28 self.stores = stores
29
30 @defer.inlineCallbacks
31 def purge_room(self, room_id: str):
32 """Deletes all record of a room
33 """
34
35 state_groups_to_delete = yield self.stores.main.purge_room(room_id)
36 yield self.stores.main.purge_room_state(room_id, state_groups_to_delete)
37
38 @defer.inlineCallbacks
39 def purge_history(self, room_id, token, delete_local_events):
40 """Deletes room history before a certain point
41
42 Args:
43 room_id (str):
44
45 token (str): A topological token to delete events before
46
47 delete_local_events (bool):
48 if True, we will delete local events as well as remote ones
49 (instead of just marking them as outliers and deleting their
50 state groups).
51 """
52 state_groups = yield self.stores.main.purge_history(
53 room_id, token, delete_local_events
54 )
55
56 logger.info("[purge] finding state groups that can be deleted")
57
58 sg_to_delete = yield self._find_unreferenced_groups(state_groups)
59
60 yield self.stores.main.purge_unreferenced_state_groups(room_id, sg_to_delete)
61
62 @defer.inlineCallbacks
63 def _find_unreferenced_groups(self, state_groups):
64 """Used when purging history to figure out which state groups can be
65 deleted.
66
67 Args:
68 state_groups (set[int]): Set of state groups referenced by events
69 that are going to be deleted.
70
71 Returns:
72 Deferred[set[int]] The set of state groups that can be deleted.
73 """
74 # Graph of state group -> previous group
75 graph = {}
76
77 # Set of events that we have found to be referenced by events
78 referenced_groups = set()
79
80 # Set of state groups we've already seen
81 state_groups_seen = set(state_groups)
82
83 # Set of state groups to handle next.
84 next_to_search = set(state_groups)
85 while next_to_search:
86 # We bound size of groups we're looking up at once, to stop the
87 # SQL query getting too big
88 if len(next_to_search) < 100:
89 current_search = next_to_search
90 next_to_search = set()
91 else:
92 current_search = set(itertools.islice(next_to_search, 100))
93 next_to_search -= current_search
94
95 referenced = yield self.stores.main.get_referenced_state_groups(
96 current_search
97 )
98 referenced_groups |= referenced
99
100 # We don't continue iterating up the state group graphs for state
101 # groups that are referenced.
102 current_search -= referenced
103
104 edges = yield self.stores.main.get_previous_state_groups(current_search)
105
106 prevs = set(edges.values())
107 # We don't bother re-handling groups we've already seen
108 prevs -= state_groups_seen
109 next_to_search |= prevs
110 state_groups_seen |= prevs
111
112 graph.update(edges)
113
114 to_delete = state_groups_seen - referenced_groups
115
116 return to_delete
1818
1919 import attr
2020
21 from twisted.internet import defer
22
2123 from synapse.api.constants import EventTypes
2224
2325 logger = logging.getLogger(__name__)
321323 )
322324
323325 return member_filter, non_member_filter
326
327
328 class StateGroupStorage(object):
329 """High level interface to fetching state for event.
330 """
331
332 def __init__(self, hs, stores):
333 self.stores = stores
334
335 def get_state_group_delta(self, state_group):
336 """Given a state group try to return a previous group and a delta between
337 the old and the new.
338
339 Returns:
340 Deferred[Tuple[Optional[int], Optional[list[dict[tuple[str, str], str]]]]]):
341 (prev_group, delta_ids)
342 """
343
344 return self.stores.main.get_state_group_delta(state_group)
345
346 @defer.inlineCallbacks
347 def get_state_groups_ids(self, _room_id, event_ids):
348 """Get the event IDs of all the state for the state groups for the given events
349
350 Args:
351 _room_id (str): id of the room for these events
352 event_ids (iterable[str]): ids of the events
353
354 Returns:
355 Deferred[dict[int, dict[tuple[str, str], str]]]:
356 dict of state_group_id -> (dict of (type, state_key) -> event id)
357 """
358 if not event_ids:
359 return {}
360
361 event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
362
363 groups = set(itervalues(event_to_groups))
364 group_to_state = yield self.stores.main._get_state_for_groups(groups)
365
366 return group_to_state
367
368 @defer.inlineCallbacks
369 def get_state_ids_for_group(self, state_group):
370 """Get the event IDs of all the state in the given state group
371
372 Args:
373 state_group (int)
374
375 Returns:
376 Deferred[dict]: Resolves to a map of (type, state_key) -> event_id
377 """
378 group_to_state = yield self._get_state_for_groups((state_group,))
379
380 return group_to_state[state_group]
381
382 @defer.inlineCallbacks
383 def get_state_groups(self, room_id, event_ids):
384 """ Get the state groups for the given list of event_ids
385 Returns:
386 Deferred[dict[int, list[EventBase]]]:
387 dict of state_group_id -> list of state events.
388 """
389 if not event_ids:
390 return {}
391
392 group_to_ids = yield self.get_state_groups_ids(room_id, event_ids)
393
394 state_event_map = yield self.stores.main.get_events(
395 [
396 ev_id
397 for group_ids in itervalues(group_to_ids)
398 for ev_id in itervalues(group_ids)
399 ],
400 get_prev_content=False,
401 )
402
403 return {
404 group: [
405 state_event_map[v]
406 for v in itervalues(event_id_map)
407 if v in state_event_map
408 ]
409 for group, event_id_map in iteritems(group_to_ids)
410 }
411
412 def _get_state_groups_from_groups(self, groups, state_filter):
413 """Returns the state groups for a given set of groups, filtering on
414 types of state events.
415
416 Args:
417 groups(list[int]): list of state group IDs to query
418 state_filter (StateFilter): The state filter used to fetch state
419 from the database.
420 Returns:
421 Deferred[dict[int, dict[tuple[str, str], str]]]:
422 dict of state_group_id -> (dict of (type, state_key) -> event id)
423 """
424
425 return self.stores.main._get_state_groups_from_groups(groups, state_filter)
426
427 @defer.inlineCallbacks
428 def get_state_for_events(self, event_ids, state_filter=StateFilter.all()):
429 """Given a list of event_ids and type tuples, return a list of state
430 dicts for each event.
431 Args:
432 event_ids (list[string])
433 state_filter (StateFilter): The state filter used to fetch state
434 from the database.
435 Returns:
436 deferred: A dict of (event_id) -> (type, state_key) -> [state_events]
437 """
438 event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
439
440 groups = set(itervalues(event_to_groups))
441 group_to_state = yield self.stores.main._get_state_for_groups(
442 groups, state_filter
443 )
444
445 state_event_map = yield self.stores.main.get_events(
446 [ev_id for sd in itervalues(group_to_state) for ev_id in itervalues(sd)],
447 get_prev_content=False,
448 )
449
450 event_to_state = {
451 event_id: {
452 k: state_event_map[v]
453 for k, v in iteritems(group_to_state[group])
454 if v in state_event_map
455 }
456 for event_id, group in iteritems(event_to_groups)
457 }
458
459 return {event: event_to_state[event] for event in event_ids}
460
461 @defer.inlineCallbacks
462 def get_state_ids_for_events(self, event_ids, state_filter=StateFilter.all()):
463 """
464 Get the state dicts corresponding to a list of events, containing the event_ids
465 of the state events (as opposed to the events themselves)
466
467 Args:
468 event_ids(list(str)): events whose state should be returned
469 state_filter (StateFilter): The state filter used to fetch state
470 from the database.
471
472 Returns:
473 A deferred dict from event_id -> (type, state_key) -> event_id
474 """
475 event_to_groups = yield self.stores.main._get_state_group_for_events(event_ids)
476
477 groups = set(itervalues(event_to_groups))
478 group_to_state = yield self.stores.main._get_state_for_groups(
479 groups, state_filter
480 )
481
482 event_to_state = {
483 event_id: group_to_state[group]
484 for event_id, group in iteritems(event_to_groups)
485 }
486
487 return {event: event_to_state[event] for event in event_ids}
488
489 @defer.inlineCallbacks
490 def get_state_for_event(self, event_id, state_filter=StateFilter.all()):
491 """
492 Get the state dict corresponding to a particular event
493
494 Args:
495 event_id(str): event whose state should be returned
496 state_filter (StateFilter): The state filter used to fetch state
497 from the database.
498
499 Returns:
500 A deferred dict from (type, state_key) -> state_event
501 """
502 state_map = yield self.get_state_for_events([event_id], state_filter)
503 return state_map[event_id]
504
505 @defer.inlineCallbacks
506 def get_state_ids_for_event(self, event_id, state_filter=StateFilter.all()):
507 """
508 Get the state dict corresponding to a particular event
509
510 Args:
511 event_id(str): event whose state should be returned
512 state_filter (StateFilter): The state filter used to fetch state
513 from the database.
514
515 Returns:
516 A deferred dict from (type, state_key) -> state_event
517 """
518 state_map = yield self.get_state_ids_for_events([event_id], state_filter)
519 return state_map[event_id]
520
521 def _get_state_for_groups(self, groups, state_filter=StateFilter.all()):
522 """Gets the state at each of a list of state groups, optionally
523 filtering by type/state_key
524
525 Args:
526 groups (iterable[int]): list of state groups for which we want
527 to get the state.
528 state_filter (StateFilter): The state filter used to fetch state
529 from the database.
530 Returns:
531 Deferred[dict[int, dict[tuple[str, str], str]]]:
532 dict of state_group_id -> (dict of (type, state_key) -> event id)
533 """
534 return self.stores.main._get_state_for_groups(groups, state_filter)
535
536 def store_state_group(
537 self, event_id, room_id, prev_group, delta_ids, current_state_ids
538 ):
539 """Store a new set of state, returning a newly assigned state group.
540
541 Args:
542 event_id (str): The event ID for which the state was calculated
543 room_id (str)
544 prev_group (int|None): A previous state group for the room, optional.
545 delta_ids (dict|None): The delta between state at `prev_group` and
546 `current_state_ids`, if `prev_group` was given. Same format as
547 `current_state_ids`.
548 current_state_ids (dict): The state to store. Map of (type, state_key)
549 to event_id.
550
551 Returns:
552 Deferred[int]: The state group ID
553 """
554 return self.stores.main.store_state_group(
555 event_id, room_id, prev_group, delta_ids, current_state_ids
556 )
4545 cur.execute("SELECT MAX(%s) FROM %s" % (column, table))
4646 else:
4747 cur.execute("SELECT MIN(%s) FROM %s" % (column, table))
48 val, = cur.fetchone()
48 (val,) = cur.fetchone()
4949 cur.close()
5050 current_id = int(val) if val else step
5151 return (max if step > 0 else min)(current_id, step)
8585
8686 deferred.addCallbacks(callback, errback)
8787
88 def observe(self):
88 def observe(self) -> defer.Deferred:
8989 """Observe the underlying deferred.
9090
91 Can return either a deferred if the underlying deferred is still pending
92 (or has failed), or the actual value. Callers may need to use maybeDeferred.
91 This returns a brand new deferred that is resolved when the underlying
92 deferred is resolved. Interacting with the returned deferred does not
93 effect the underdlying deferred.
9394 """
9495 if not self._result:
9596 d = defer.Deferred()
104105 return d
105106 else:
106107 success, res = self._result
107 return res if success else defer.fail(res)
108 return defer.succeed(res) if success else defer.fail(res)
108109
109110 def observers(self):
110111 return self._observers
137138 the number of concurrent executions.
138139
139140 Args:
140 func (func): Function to execute, should return a deferred.
141 func (func): Function to execute, should return a deferred or coroutine.
141142 args (list): List of arguments to pass to func, each invocation of func
142143 gets a signle argument.
143144 limit (int): Maximum number of conccurent executions.
147148 """
148149 it = iter(args)
149150
150 @defer.inlineCallbacks
151 def _concurrently_execute_inner():
151 async def _concurrently_execute_inner():
152152 try:
153153 while True:
154 yield func(next(it))
154 await maybe_awaitable(func(next(it)))
155155 except StopIteration:
156156 pass
157157
308308 )
309309
310310 else:
311 logger.warn(
311 logger.warning(
312312 "Unexpected exception waiting for linearizer lock %r for key %r",
313313 self.name,
314314 key,
106106 if collect_callback:
107107 collect_callback()
108108 except Exception as e:
109 logger.warn("Error calculating metrics for %s: %s", cache_name, e)
109 logger.warning("Error calculating metrics for %s: %s", cache_name, e)
110110 raise
111111
112112 yield GaugeMetricFamily("__unused", "")
1616 import inspect
1717 import logging
1818 import threading
19 from collections import namedtuple
20 from typing import Any, cast
19 from typing import Any, Tuple, Union, cast
20 from weakref import WeakValueDictionary
2121
2222 from six import itervalues
2323
3636 from . import register_cache
3737
3838 logger = logging.getLogger(__name__)
39
40 CacheKey = Union[Tuple, Any]
3941
4042
4143 class _CachedFunction(Protocol):
429431 # Add our own `cache_context` to argument list if the wrapped function
430432 # has asked for one
431433 if self.add_cache_context:
432 kwargs["cache_context"] = _CacheContext(cache, cache_key)
434 kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key)
433435
434436 try:
435437 cached_result_d = cache.get(cache_key, callback=invalidate_callback)
437439 if isinstance(cached_result_d, ObservableDeferred):
438440 observer = cached_result_d.observe()
439441 else:
440 observer = cached_result_d
442 observer = defer.succeed(cached_result_d)
441443
442444 except KeyError:
443445 ret = defer.maybeDeferred(
481483 Given a list of keys it looks in the cache to find any hits, then passes
482484 the list of missing keys to the wrapped function.
483485
484 Once wrapped, the function returns either a Deferred which resolves to
485 the list of results, or (if all results were cached), just the list of
486 results.
486 Once wrapped, the function returns a Deferred which resolves to the list
487 of results.
487488 """
488489
489490 def __init__(
617618 )
618619 return make_deferred_yieldable(d)
619620 else:
620 return results
621 return defer.succeed(results)
621622
622623 obj.__dict__[self.orig.__name__] = wrapped
623624
624625 return wrapped
625626
626627
627 class _CacheContext(namedtuple("_CacheContext", ("cache", "key"))):
628 # We rely on _CacheContext implementing __eq__ and __hash__ sensibly,
629 # which namedtuple does for us (i.e. two _CacheContext are the same if
630 # their caches and keys match). This is important in particular to
631 # dedupe when we add callbacks to lru cache nodes, otherwise the number
632 # of callbacks would grow.
633 def invalidate(self):
634 self.cache.invalidate(self.key)
628 class _CacheContext:
629 """Holds cache information from the cached function higher in the calling order.
630
631 Can be used to invalidate the higher level cache entry if something changes
632 on a lower level.
633 """
634
635 _cache_context_objects = (
636 WeakValueDictionary()
637 ) # type: WeakValueDictionary[Tuple[Cache, CacheKey], _CacheContext]
638
639 def __init__(self, cache, cache_key): # type: (Cache, CacheKey) -> None
640 self._cache = cache
641 self._cache_key = cache_key
642
643 def invalidate(self): # type: () -> None
644 """Invalidates the cache entry referred to by the context."""
645 self._cache.invalidate(self._cache_key)
646
647 @classmethod
648 def get_instance(cls, cache, cache_key): # type: (Cache, CacheKey) -> _CacheContext
649 """Returns an instance constructed with the given arguments.
650
651 A new instance is only created if none already exists.
652 """
653
654 # We make sure there are no identical _CacheContext instances. This is
655 # important in particular to dedupe when we add callbacks to lru cache
656 # nodes, otherwise the number of callbacks would grow.
657 return cls._cache_context_objects.setdefault(
658 (cache, cache_key), cls(cache, cache_key)
659 )
635660
636661
637662 def cached(
1919
2020
2121 def create_resource_tree(desired_tree, root_resource):
22 """Create the resource tree for this Home Server.
22 """Create the resource tree for this homeserver.
2323
2424 This in unduly complicated because Twisted does not support putting
2525 child resources more than 1 level deep at a time.
118118 context = LoggingContext.current_context()
119119
120120 if context != self.start_context:
121 logger.warn(
121 logger.warning(
122122 "Context has unexpectedly changed from '%s' to '%s'. (%r)",
123123 self.start_context,
124124 context,
127127 return
128128
129129 if not context:
130 logger.warn("Expected context. (%r)", self.name)
130 logger.warning("Expected context. (%r)", self.name)
131131 return
132132
133133 current = context.get_resource_usage()
139139 block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec)
140140 block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec)
141141 except ValueError:
142 logger.warn(
142 logger.warning(
143143 "Failed to save metrics! OLD: %r, NEW: %r", self.start_usage, current
144144 )
145145
3232 resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
3333 )
3434 except (ValueError, resource.error) as e:
35 logger.warn("Failed to set file or core limit: %s", e)
35 logger.warning("Failed to set file or core limit: %s", e)
4141 try:
4242 null = open(os.devnull, "w")
4343 cwd = os.path.dirname(os.path.abspath(module.__file__))
44
4445 try:
4546 git_branch = (
4647 subprocess.check_output(
5051 .decode("ascii")
5152 )
5253 git_branch = "b=" + git_branch
53 except subprocess.CalledProcessError:
54 except (subprocess.CalledProcessError, FileNotFoundError):
55 # FileNotFoundError can arise when git is not installed
5456 git_branch = ""
5557
5658 try:
6264 .decode("ascii")
6365 )
6466 git_tag = "t=" + git_tag
65 except subprocess.CalledProcessError:
67 except (subprocess.CalledProcessError, FileNotFoundError):
6668 git_tag = ""
6769
6870 try:
7375 .strip()
7476 .decode("ascii")
7577 )
76 except subprocess.CalledProcessError:
78 except (subprocess.CalledProcessError, FileNotFoundError):
7779 git_commit = ""
7880
7981 try:
8890 )
8991
9092 git_dirty = "dirty" if is_dirty else ""
91 except subprocess.CalledProcessError:
93 except (subprocess.CalledProcessError, FileNotFoundError):
9294 git_dirty = ""
9395
9496 if git_branch or git_tag or git_commit or git_dirty:
2222
2323 from synapse.api.constants import EventTypes, Membership
2424 from synapse.events.utils import prune_event
25 from synapse.storage import Storage
2526 from synapse.storage.state import StateFilter
2627 from synapse.types import get_domain_from_id
2728
4243
4344 @defer.inlineCallbacks
4445 def filter_events_for_client(
45 store, user_id, events, is_peeking=False, always_include_ids=frozenset()
46 storage: Storage, user_id, events, is_peeking=False, always_include_ids=frozenset()
4647 ):
4748 """
4849 Check which events a user is allowed to see
4950
5051 Args:
51 store (synapse.storage.DataStore): our datastore (can also be a worker
52 store)
52 storage
5353 user_id(str): user id to be checked
5454 events(list[synapse.events.EventBase]): sequence of events to be checked
5555 is_peeking(bool): should be True if:
6767 events = list(e for e in events if not e.internal_metadata.is_soft_failed())
6868
6969 types = ((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, user_id))
70 event_id_to_state = yield store.get_state_for_events(
70 event_id_to_state = yield storage.state.get_state_for_events(
7171 frozenset(e.event_id for e in events),
7272 state_filter=StateFilter.from_types(types),
7373 )
7474
75 ignore_dict_content = yield store.get_global_account_data_by_type_for_user(
75 ignore_dict_content = yield storage.main.get_global_account_data_by_type_for_user(
7676 "m.ignored_user_list", user_id
7777 )
7878
8383 else []
8484 )
8585
86 erased_senders = yield store.are_users_erased((e.sender for e in events))
86 erased_senders = yield storage.main.are_users_erased((e.sender for e in events))
8787
8888 def allowed(event):
8989 """
212212
213213 @defer.inlineCallbacks
214214 def filter_events_for_server(
215 store, server_name, events, redact=True, check_history_visibility_only=False
215 storage: Storage,
216 server_name,
217 events,
218 redact=True,
219 check_history_visibility_only=False,
216220 ):
217221 """Filter a list of events based on whether given server is allowed to
218222 see them.
219223
220224 Args:
221 store (DataStore)
225 storage
222226 server_name (str)
223227 events (iterable[FrozenEvent])
224228 redact (bool): Whether to return a redacted version of the event, or
273277 # Lets check to see if all the events have a history visibility
274278 # of "shared" or "world_readable". If thats the case then we don't
275279 # need to check membership (as we know the server is in the room).
276 event_to_state_ids = yield store.get_state_ids_for_events(
280 event_to_state_ids = yield storage.state.get_state_ids_for_events(
277281 frozenset(e.event_id for e in events),
278282 state_filter=StateFilter.from_types(
279283 types=((EventTypes.RoomHistoryVisibility, ""),)
291295 if not visibility_ids:
292296 all_open = True
293297 else:
294 event_map = yield store.get_events(visibility_ids)
298 event_map = yield storage.main.get_events(visibility_ids)
295299 all_open = all(
296300 e.content.get("history_visibility") in (None, "shared", "world_readable")
297301 for e in itervalues(event_map)
298302 )
299303
300304 if not check_history_visibility_only:
301 erased_senders = yield store.are_users_erased((e.sender for e in events))
305 erased_senders = yield storage.main.are_users_erased((e.sender for e in events))
302306 else:
303307 # We don't want to check whether users are erased, which is equivalent
304308 # to no users having been erased.
327331
328332 # first, for each event we're wanting to return, get the event_ids
329333 # of the history vis and membership state at those events.
330 event_to_state_ids = yield store.get_state_ids_for_events(
334 event_to_state_ids = yield storage.state.get_state_ids_for_events(
331335 frozenset(e.event_id for e in events),
332336 state_filter=StateFilter.from_types(
333337 types=((EventTypes.RoomHistoryVisibility, ""), (EventTypes.Member, None))
357361 return False
358362 return state_key[idx + 1 :] == server_name
359363
360 event_map = yield store.get_events(
364 event_map = yield storage.main.get_events(
361365 [
362366 e_id
363367 for e_id, key in iteritems(event_id_to_state_key)
1818
1919 from twisted.internet import defer
2020
21 from synapse.api.constants import EventContentFields
2122 from synapse.api.errors import SynapseError
2223 from synapse.api.filtering import Filter
2324 from synapse.events import FrozenEvent
9495 "types": ["m.room.message"],
9596 "not_rooms": ["!726s6s6q:example.com"],
9697 "not_senders": ["@spam:example.com"],
98 "org.matrix.labels": ["#fun"],
99 "org.matrix.not_labels": ["#work"],
97100 },
98101 "ephemeral": {
99102 "types": ["m.receipt", "m.typing"],
319322 )
320323 self.assertFalse(Filter(definition).check(event))
321324
325 def test_filter_labels(self):
326 definition = {"org.matrix.labels": ["#fun"]}
327 event = MockEvent(
328 sender="@foo:bar",
329 type="m.room.message",
330 room_id="!secretbase:unknown",
331 content={EventContentFields.LABELS: ["#fun"]},
332 )
333
334 self.assertTrue(Filter(definition).check(event))
335
336 event = MockEvent(
337 sender="@foo:bar",
338 type="m.room.message",
339 room_id="!secretbase:unknown",
340 content={EventContentFields.LABELS: ["#notfun"]},
341 )
342
343 self.assertFalse(Filter(definition).check(event))
344
345 def test_filter_not_labels(self):
346 definition = {"org.matrix.not_labels": ["#fun"]}
347 event = MockEvent(
348 sender="@foo:bar",
349 type="m.room.message",
350 room_id="!secretbase:unknown",
351 content={EventContentFields.LABELS: ["#fun"]},
352 )
353
354 self.assertFalse(Filter(definition).check(event))
355
356 event = MockEvent(
357 sender="@foo:bar",
358 type="m.room.message",
359 room_id="!secretbase:unknown",
360 content={EventContentFields.LABELS: ["#notfun"]},
361 )
362
363 self.assertTrue(Filter(definition).check(event))
364
322365 @defer.inlineCallbacks
323366 def test_filter_presence_match(self):
324367 user_filter_json = {"presence": {"types": ["m.*"]}}
177177 kr = keyring.Keyring(self.hs)
178178
179179 key1 = signedjson.key.generate_signing_key(1)
180 r = self.hs.datastore.store_server_verify_keys(
180 r = self.hs.get_datastore().store_server_verify_keys(
181181 "server9",
182182 time.time() * 1000,
183183 [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), 1000))],
208208 )
209209
210210 key1 = signedjson.key.generate_signing_key(1)
211 r = self.hs.datastore.store_server_verify_keys(
211 r = self.hs.get_datastore().store_server_verify_keys(
212212 "server9",
213213 time.time() * 1000,
214214 [("server9", get_key_id(key1), FetchKeyResult(get_verify_key(key1), None))],
1111 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212 # See the License for the specific language governing permissions and
1313 # limitations under the License.
14 import logging
15
1416 from synapse.api.constants import EventTypes
1517 from synapse.api.errors import AuthError, Codes
18 from synapse.federation.federation_base import event_from_pdu_json
19 from synapse.logging.context import LoggingContext, run_in_background
1620 from synapse.rest import admin
1721 from synapse.rest.client.v1 import login, room
1822
1923 from tests import unittest
24
25 logger = logging.getLogger(__name__)
2026
2127
2228 class FederationTestCase(unittest.HomeserverTestCase):
7884 self.assertEqual(failure.code, 403, failure)
7985 self.assertEqual(failure.errcode, Codes.FORBIDDEN, failure)
8086 self.assertEqual(failure.msg, "You are not invited to this room.")
87
88 def test_rejected_message_event_state(self):
89 """
90 Check that we store the state group correctly for rejected non-state events.
91
92 Regression test for #6289.
93 """
94 OTHER_SERVER = "otherserver"
95 OTHER_USER = "@otheruser:" + OTHER_SERVER
96
97 # create the room
98 user_id = self.register_user("kermit", "test")
99 tok = self.login("kermit", "test")
100 room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
101
102 # pretend that another server has joined
103 join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id)
104
105 # check the state group
106 sg = self.successResultOf(
107 self.store._get_state_group_for_event(join_event.event_id)
108 )
109
110 # build and send an event which will be rejected
111 ev = event_from_pdu_json(
112 {
113 "type": EventTypes.Message,
114 "content": {},
115 "room_id": room_id,
116 "sender": "@yetanotheruser:" + OTHER_SERVER,
117 "depth": join_event["depth"] + 1,
118 "prev_events": [join_event.event_id],
119 "auth_events": [],
120 "origin_server_ts": self.clock.time_msec(),
121 },
122 join_event.format_version,
123 )
124
125 with LoggingContext(request="send_rejected"):
126 d = run_in_background(self.handler.on_receive_pdu, OTHER_SERVER, ev)
127 self.get_success(d)
128
129 # that should have been rejected
130 e = self.get_success(self.store.get_event(ev.event_id, allow_rejected=True))
131 self.assertIsNotNone(e.rejected_reason)
132
133 # ... and the state group should be the same as before
134 sg2 = self.successResultOf(self.store._get_state_group_for_event(ev.event_id))
135
136 self.assertEqual(sg, sg2)
137
138 def test_rejected_state_event_state(self):
139 """
140 Check that we store the state group correctly for rejected state events.
141
142 Regression test for #6289.
143 """
144 OTHER_SERVER = "otherserver"
145 OTHER_USER = "@otheruser:" + OTHER_SERVER
146
147 # create the room
148 user_id = self.register_user("kermit", "test")
149 tok = self.login("kermit", "test")
150 room_id = self.helper.create_room_as(room_creator=user_id, tok=tok)
151
152 # pretend that another server has joined
153 join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id)
154
155 # check the state group
156 sg = self.successResultOf(
157 self.store._get_state_group_for_event(join_event.event_id)
158 )
159
160 # build and send an event which will be rejected
161 ev = event_from_pdu_json(
162 {
163 "type": "org.matrix.test",
164 "state_key": "test_key",
165 "content": {},
166 "room_id": room_id,
167 "sender": "@yetanotheruser:" + OTHER_SERVER,
168 "depth": join_event["depth"] + 1,
169 "prev_events": [join_event.event_id],
170 "auth_events": [],
171 "origin_server_ts": self.clock.time_msec(),
172 },
173 join_event.format_version,
174 )
175
176 with LoggingContext(request="send_rejected"):
177 d = run_in_background(self.handler.on_receive_pdu, OTHER_SERVER, ev)
178 self.get_success(d)
179
180 # that should have been rejected
181 e = self.get_success(self.store.get_event(ev.event_id, allow_rejected=True))
182 self.assertIsNotNone(e.rejected_reason)
183
184 # ... and the state group should be the same as before
185 sg2 = self.successResultOf(self.store._get_state_group_for_event(ev.event_id))
186
187 self.assertEqual(sg, sg2)
188
189 def _build_and_send_join_event(self, other_server, other_user, room_id):
190 join_event = self.get_success(
191 self.handler.on_make_join_request(other_server, room_id, other_user)
192 )
193 # the auth code requires that a signature exists, but doesn't check that
194 # signature... go figure.
195 join_event.signatures[other_server] = {"x": "y"}
196 with LoggingContext(request="send_join"):
197 d = run_in_background(
198 self.handler.on_send_join_request, other_server, join_event
199 )
200 self.get_success(d)
201
202 # sanity-check: the room should show that the new user is a member
203 r = self.get_success(self.store.get_current_state_ids(room_id))
204 self.assertEqual(r[(EventTypes.Member, other_user)], join_event.event_id)
205
206 return join_event
7272 "get_received_txn_response",
7373 "set_received_txn_response",
7474 "get_destination_retry_timings",
75 "get_devices_by_remote",
75 "get_device_updates_by_remote",
7676 # Bits that user_directory needs
7777 "get_user_directory_stream_pos",
7878 "get_current_state_deltas",
108108 retry_timings_res
109109 )
110110
111 self.datastore.get_devices_by_remote.return_value = (0, [])
111 self.datastore.get_device_updates_by_remote.return_value = (0, [])
112112
113113 def get_received_txn_response(*args):
114114 return defer.succeed(None)
143143 self.datastore.get_to_device_stream_token = lambda: 0
144144 self.datastore.get_new_device_msgs_for_remote = lambda *args, **kargs: ([], 0)
145145 self.datastore.delete_device_msgs_for_remote = lambda *args, **kargs: None
146 self.datastore.set_received_txn_response = lambda *args, **kwargs: defer.succeed(
147 None
148 )
146149
147150 def test_started_typing_local(self):
148151 self.room_members = [U_APPLE, U_BANANA]
1919 from OpenSSL import SSL
2020 from OpenSSL.SSL import Connection
2121 from twisted.internet.interfaces import IOpenSSLServerConnectionCreator
22 from twisted.internet.ssl import Certificate, trustRootFromCertificates
23 from twisted.web.client import BrowserLikePolicyForHTTPS # noqa: F401
24 from twisted.web.iweb import IPolicyForHTTPS # noqa: F401
25
26
27 def get_test_https_policy():
28 """Get a test IPolicyForHTTPS which trusts the test CA cert
29
30 Returns:
31 IPolicyForHTTPS
32 """
33 ca_file = get_test_ca_cert_file()
34 with open(ca_file) as stream:
35 content = stream.read()
36 cert = Certificate.loadPEM(content)
37 trust_root = trustRootFromCertificates([cert])
38 return BrowserLikePolicyForHTTPS(trustRoot=trust_root)
2239
2340
2441 def get_test_ca_cert_file():
123123 FakeTransport(client_protocol, self.reactor, server_tls_protocol)
124124 )
125125
126 # grab a hold of the TLS connection, in case it gets torn down
127 server_tls_connection = server_tls_protocol._tlsConnection
128
129 # fish the test server back out of the server-side TLS protocol.
130 http_protocol = server_tls_protocol.wrappedProtocol
131
126132 # give the reactor a pump to get the TLS juices flowing.
127133 self.reactor.pump((0.1,))
128134
129135 # check the SNI
130 server_name = server_tls_protocol._tlsConnection.get_servername()
136 server_name = server_tls_connection.get_servername()
131137 self.assertEqual(
132138 server_name,
133139 expected_sni,
134140 "Expected SNI %s but got %s" % (expected_sni, server_name),
135141 )
136142
137 # fish the test server back out of the server-side TLS protocol.
138 return server_tls_protocol.wrappedProtocol
143 return http_protocol
139144
140145 @defer.inlineCallbacks
141146 def _make_get_request(self, uri):
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 The Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15
16 import treq
17
18 from twisted.internet import interfaces # noqa: F401
19 from twisted.internet.protocol import Factory
20 from twisted.protocols.tls import TLSMemoryBIOFactory
21 from twisted.web.http import HTTPChannel
22
23 from synapse.http.proxyagent import ProxyAgent
24
25 from tests.http import TestServerTLSConnectionFactory, get_test_https_policy
26 from tests.server import FakeTransport, ThreadedMemoryReactorClock
27 from tests.unittest import TestCase
28
29 logger = logging.getLogger(__name__)
30
31 HTTPFactory = Factory.forProtocol(HTTPChannel)
32
33
34 class MatrixFederationAgentTests(TestCase):
35 def setUp(self):
36 self.reactor = ThreadedMemoryReactorClock()
37
38 def _make_connection(
39 self, client_factory, server_factory, ssl=False, expected_sni=None
40 ):
41 """Builds a test server, and completes the outgoing client connection
42
43 Args:
44 client_factory (interfaces.IProtocolFactory): the the factory that the
45 application is trying to use to make the outbound connection. We will
46 invoke it to build the client Protocol
47
48 server_factory (interfaces.IProtocolFactory): a factory to build the
49 server-side protocol
50
51 ssl (bool): If true, we will expect an ssl connection and wrap
52 server_factory with a TLSMemoryBIOFactory
53
54 expected_sni (bytes|None): the expected SNI value
55
56 Returns:
57 IProtocol: the server Protocol returned by server_factory
58 """
59 if ssl:
60 server_factory = _wrap_server_factory_for_tls(server_factory)
61
62 server_protocol = server_factory.buildProtocol(None)
63
64 # now, tell the client protocol factory to build the client protocol,
65 # and wire the output of said protocol up to the server via
66 # a FakeTransport.
67 #
68 # Normally this would be done by the TCP socket code in Twisted, but we are
69 # stubbing that out here.
70 client_protocol = client_factory.buildProtocol(None)
71 client_protocol.makeConnection(
72 FakeTransport(server_protocol, self.reactor, client_protocol)
73 )
74
75 # tell the server protocol to send its stuff back to the client, too
76 server_protocol.makeConnection(
77 FakeTransport(client_protocol, self.reactor, server_protocol)
78 )
79
80 if ssl:
81 http_protocol = server_protocol.wrappedProtocol
82 tls_connection = server_protocol._tlsConnection
83 else:
84 http_protocol = server_protocol
85 tls_connection = None
86
87 # give the reactor a pump to get the TLS juices flowing (if needed)
88 self.reactor.advance(0)
89
90 if expected_sni is not None:
91 server_name = tls_connection.get_servername()
92 self.assertEqual(
93 server_name,
94 expected_sni,
95 "Expected SNI %s but got %s" % (expected_sni, server_name),
96 )
97
98 return http_protocol
99
100 def test_http_request(self):
101 agent = ProxyAgent(self.reactor)
102
103 self.reactor.lookups["test.com"] = "1.2.3.4"
104 d = agent.request(b"GET", b"http://test.com")
105
106 # there should be a pending TCP connection
107 clients = self.reactor.tcpClients
108 self.assertEqual(len(clients), 1)
109 (host, port, client_factory, _timeout, _bindAddress) = clients[0]
110 self.assertEqual(host, "1.2.3.4")
111 self.assertEqual(port, 80)
112
113 # make a test server, and wire up the client
114 http_server = self._make_connection(
115 client_factory, _get_test_protocol_factory()
116 )
117
118 # the FakeTransport is async, so we need to pump the reactor
119 self.reactor.advance(0)
120
121 # now there should be a pending request
122 self.assertEqual(len(http_server.requests), 1)
123
124 request = http_server.requests[0]
125 self.assertEqual(request.method, b"GET")
126 self.assertEqual(request.path, b"/")
127 self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
128 request.write(b"result")
129 request.finish()
130
131 self.reactor.advance(0)
132
133 resp = self.successResultOf(d)
134 body = self.successResultOf(treq.content(resp))
135 self.assertEqual(body, b"result")
136
137 def test_https_request(self):
138 agent = ProxyAgent(self.reactor, contextFactory=get_test_https_policy())
139
140 self.reactor.lookups["test.com"] = "1.2.3.4"
141 d = agent.request(b"GET", b"https://test.com/abc")
142
143 # there should be a pending TCP connection
144 clients = self.reactor.tcpClients
145 self.assertEqual(len(clients), 1)
146 (host, port, client_factory, _timeout, _bindAddress) = clients[0]
147 self.assertEqual(host, "1.2.3.4")
148 self.assertEqual(port, 443)
149
150 # make a test server, and wire up the client
151 http_server = self._make_connection(
152 client_factory,
153 _get_test_protocol_factory(),
154 ssl=True,
155 expected_sni=b"test.com",
156 )
157
158 # the FakeTransport is async, so we need to pump the reactor
159 self.reactor.advance(0)
160
161 # now there should be a pending request
162 self.assertEqual(len(http_server.requests), 1)
163
164 request = http_server.requests[0]
165 self.assertEqual(request.method, b"GET")
166 self.assertEqual(request.path, b"/abc")
167 self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
168 request.write(b"result")
169 request.finish()
170
171 self.reactor.advance(0)
172
173 resp = self.successResultOf(d)
174 body = self.successResultOf(treq.content(resp))
175 self.assertEqual(body, b"result")
176
177 def test_http_request_via_proxy(self):
178 agent = ProxyAgent(self.reactor, http_proxy=b"proxy.com:8888")
179
180 self.reactor.lookups["proxy.com"] = "1.2.3.5"
181 d = agent.request(b"GET", b"http://test.com")
182
183 # there should be a pending TCP connection
184 clients = self.reactor.tcpClients
185 self.assertEqual(len(clients), 1)
186 (host, port, client_factory, _timeout, _bindAddress) = clients[0]
187 self.assertEqual(host, "1.2.3.5")
188 self.assertEqual(port, 8888)
189
190 # make a test server, and wire up the client
191 http_server = self._make_connection(
192 client_factory, _get_test_protocol_factory()
193 )
194
195 # the FakeTransport is async, so we need to pump the reactor
196 self.reactor.advance(0)
197
198 # now there should be a pending request
199 self.assertEqual(len(http_server.requests), 1)
200
201 request = http_server.requests[0]
202 self.assertEqual(request.method, b"GET")
203 self.assertEqual(request.path, b"http://test.com")
204 self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
205 request.write(b"result")
206 request.finish()
207
208 self.reactor.advance(0)
209
210 resp = self.successResultOf(d)
211 body = self.successResultOf(treq.content(resp))
212 self.assertEqual(body, b"result")
213
214 def test_https_request_via_proxy(self):
215 agent = ProxyAgent(
216 self.reactor,
217 contextFactory=get_test_https_policy(),
218 https_proxy=b"proxy.com",
219 )
220
221 self.reactor.lookups["proxy.com"] = "1.2.3.5"
222 d = agent.request(b"GET", b"https://test.com/abc")
223
224 # there should be a pending TCP connection
225 clients = self.reactor.tcpClients
226 self.assertEqual(len(clients), 1)
227 (host, port, client_factory, _timeout, _bindAddress) = clients[0]
228 self.assertEqual(host, "1.2.3.5")
229 self.assertEqual(port, 1080)
230
231 # make a test HTTP server, and wire up the client
232 proxy_server = self._make_connection(
233 client_factory, _get_test_protocol_factory()
234 )
235
236 # fish the transports back out so that we can do the old switcheroo
237 s2c_transport = proxy_server.transport
238 client_protocol = s2c_transport.other
239 c2s_transport = client_protocol.transport
240
241 # the FakeTransport is async, so we need to pump the reactor
242 self.reactor.advance(0)
243
244 # now there should be a pending CONNECT request
245 self.assertEqual(len(proxy_server.requests), 1)
246
247 request = proxy_server.requests[0]
248 self.assertEqual(request.method, b"CONNECT")
249 self.assertEqual(request.path, b"test.com:443")
250
251 # tell the proxy server not to close the connection
252 proxy_server.persistent = True
253
254 # this just stops the http Request trying to do a chunked response
255 # request.setHeader(b"Content-Length", b"0")
256 request.finish()
257
258 # now we can replace the proxy channel with a new, SSL-wrapped HTTP channel
259 ssl_factory = _wrap_server_factory_for_tls(_get_test_protocol_factory())
260 ssl_protocol = ssl_factory.buildProtocol(None)
261 http_server = ssl_protocol.wrappedProtocol
262
263 ssl_protocol.makeConnection(
264 FakeTransport(client_protocol, self.reactor, ssl_protocol)
265 )
266 c2s_transport.other = ssl_protocol
267
268 self.reactor.advance(0)
269
270 server_name = ssl_protocol._tlsConnection.get_servername()
271 expected_sni = b"test.com"
272 self.assertEqual(
273 server_name,
274 expected_sni,
275 "Expected SNI %s but got %s" % (expected_sni, server_name),
276 )
277
278 # now there should be a pending request
279 self.assertEqual(len(http_server.requests), 1)
280
281 request = http_server.requests[0]
282 self.assertEqual(request.method, b"GET")
283 self.assertEqual(request.path, b"/abc")
284 self.assertEqual(request.requestHeaders.getRawHeaders(b"host"), [b"test.com"])
285 request.write(b"result")
286 request.finish()
287
288 self.reactor.advance(0)
289
290 resp = self.successResultOf(d)
291 body = self.successResultOf(treq.content(resp))
292 self.assertEqual(body, b"result")
293
294
295 def _wrap_server_factory_for_tls(factory, sanlist=None):
296 """Wrap an existing Protocol Factory with a test TLSMemoryBIOFactory
297
298 The resultant factory will create a TLS server which presents a certificate
299 signed by our test CA, valid for the domains in `sanlist`
300
301 Args:
302 factory (interfaces.IProtocolFactory): protocol factory to wrap
303 sanlist (iterable[bytes]): list of domains the cert should be valid for
304
305 Returns:
306 interfaces.IProtocolFactory
307 """
308 if sanlist is None:
309 sanlist = [b"DNS:test.com"]
310
311 connection_creator = TestServerTLSConnectionFactory(sanlist=sanlist)
312 return TLSMemoryBIOFactory(
313 connection_creator, isClient=False, wrappedFactory=factory
314 )
315
316
317 def _get_test_protocol_factory():
318 """Get a protocol Factory which will build an HTTPChannel
319
320 Returns:
321 interfaces.IProtocolFactory
322 """
323 server_factory = Factory.forProtocol(HTTPChannel)
324
325 # Request.finish expects the factory to have a 'log' method.
326 server_factory.log = _log_request
327
328 return server_factory
329
330
331 def _log_request(request):
332 """Implements Factory.log, which is expected by Request.finish"""
333 logger.info("Completed request %s", request)
4949 config = self.default_config()
5050 config["start_pushers"] = True
5151
52 hs = self.setup_test_homeserver(config=config, simple_http_client=m)
52 hs = self.setup_test_homeserver(config=config, proxied_http_client=m)
5353
5454 return hs
5555
4040 def prepare(self, reactor, clock, hs):
4141
4242 self.master_store = self.hs.get_datastore()
43 self.storage = hs.get_storage()
4344 self.slaved_store = self.STORE_TYPE(self.hs.get_db_conn(), self.hs)
4445 self.event_id = 0
4546
233233 type="m.room.member", sender=USER_ID_2, key=USER_ID_2, membership="join"
234234 )
235235 msg, msgctx = self.build_event()
236 self.get_success(self.master_store.persist_events([(j2, j2ctx), (msg, msgctx)]))
236 self.get_success(
237 self.storage.persistence.persist_events([(j2, j2ctx), (msg, msgctx)])
238 )
237239 self.replicate()
238240
239241 event_source = RoomEventSource(self.hs)
289291
290292 if backfill:
291293 self.get_success(
292 self.master_store.persist_events([(event, context)], backfilled=True)
294 self.storage.persistence.persist_events(
295 [(event, context)], backfilled=True
296 )
293297 )
294298 else:
295 self.get_success(self.master_store.persist_event(event, context))
299 self.get_success(self.storage.persistence.persist_event(event, context))
296300
297301 return event
298302
560560 self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
561561
562562 return channel.json_body["groups"]
563
564
565 class PurgeRoomTestCase(unittest.HomeserverTestCase):
566 """Test /purge_room admin API.
567 """
568
569 servlets = [
570 synapse.rest.admin.register_servlets,
571 login.register_servlets,
572 room.register_servlets,
573 ]
574
575 def prepare(self, reactor, clock, hs):
576 self.store = hs.get_datastore()
577
578 self.admin_user = self.register_user("admin", "pass", admin=True)
579 self.admin_user_tok = self.login("admin", "pass")
580
581 def test_purge_room(self):
582 room_id = self.helper.create_room_as(self.admin_user, tok=self.admin_user_tok)
583
584 # All users have to have left the room.
585 self.helper.leave(room_id, user=self.admin_user, tok=self.admin_user_tok)
586
587 url = "/_synapse/admin/v1/purge_room"
588 request, channel = self.make_request(
589 "POST",
590 url.encode("ascii"),
591 {"room_id": room_id},
592 access_token=self.admin_user_tok,
593 )
594 self.render(request)
595
596 self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
597
598 # Test that the following tables have been purged of all rows related to the room.
599 for table in (
600 "current_state_events",
601 "event_backward_extremities",
602 "event_forward_extremities",
603 "event_json",
604 "event_push_actions",
605 "event_search",
606 "events",
607 "group_rooms",
608 "public_room_list_stream",
609 "receipts_graph",
610 "receipts_linearized",
611 "room_aliases",
612 "room_depth",
613 "room_memberships",
614 "room_stats_state",
615 "room_stats_current",
616 "room_stats_historical",
617 "room_stats_earliest_token",
618 "rooms",
619 "stream_ordering_to_exterm",
620 "users_in_public_rooms",
621 "users_who_share_private_rooms",
622 "appservice_room_list",
623 "e2e_room_keys",
624 "event_push_summary",
625 "pusher_throttle",
626 "group_summary_rooms",
627 "local_invites",
628 "room_account_data",
629 "room_tags",
630 "state_groups",
631 "state_groups_state",
632 ):
633 count = self.get_success(
634 self.store._simple_select_one_onecol(
635 table=table,
636 keyvalues={"room_id": room_id},
637 retcol="COUNT(*)",
638 desc="test_purge_room",
639 )
640 )
641
642 self.assertEqual(count, 0, msg="Rows not purged in {}".format(table))
643
644 test_purge_room.skip = "Disabled because it's currently broken"
2323 from twisted.internet import defer
2424
2525 import synapse.rest.admin
26 from synapse.api.constants import Membership
26 from synapse.api.constants import EventContentFields, EventTypes, Membership
2727 from synapse.rest.client.v1 import login, profile, room
2828
2929 from tests import unittest
809809 self.assertEquals(token, channel.json_body["start"])
810810 self.assertTrue("chunk" in channel.json_body)
811811 self.assertTrue("end" in channel.json_body)
812
813 def test_filter_labels(self):
814 """Test that we can filter by a label."""
815 message_filter = json.dumps(
816 {"types": [EventTypes.Message], "org.matrix.labels": ["#fun"]}
817 )
818
819 events = self._test_filter_labels(message_filter)
820
821 self.assertEqual(len(events), 2, [event["content"] for event in events])
822 self.assertEqual(events[0]["content"]["body"], "with right label", events[0])
823 self.assertEqual(events[1]["content"]["body"], "with right label", events[1])
824
825 def test_filter_not_labels(self):
826 """Test that we can filter by the absence of a label."""
827 message_filter = json.dumps(
828 {"types": [EventTypes.Message], "org.matrix.not_labels": ["#fun"]}
829 )
830
831 events = self._test_filter_labels(message_filter)
832
833 self.assertEqual(len(events), 3, [event["content"] for event in events])
834 self.assertEqual(events[0]["content"]["body"], "without label", events[0])
835 self.assertEqual(events[1]["content"]["body"], "with wrong label", events[1])
836 self.assertEqual(
837 events[2]["content"]["body"], "with two wrong labels", events[2]
838 )
839
840 def test_filter_labels_not_labels(self):
841 """Test that we can filter by both a label and the absence of another label."""
842 sync_filter = json.dumps(
843 {
844 "types": [EventTypes.Message],
845 "org.matrix.labels": ["#work"],
846 "org.matrix.not_labels": ["#notfun"],
847 }
848 )
849
850 events = self._test_filter_labels(sync_filter)
851
852 self.assertEqual(len(events), 1, [event["content"] for event in events])
853 self.assertEqual(events[0]["content"]["body"], "with wrong label", events[0])
854
855 def _test_filter_labels(self, message_filter):
856 self.helper.send_event(
857 room_id=self.room_id,
858 type=EventTypes.Message,
859 content={
860 "msgtype": "m.text",
861 "body": "with right label",
862 EventContentFields.LABELS: ["#fun"],
863 },
864 )
865
866 self.helper.send_event(
867 room_id=self.room_id,
868 type=EventTypes.Message,
869 content={"msgtype": "m.text", "body": "without label"},
870 )
871
872 self.helper.send_event(
873 room_id=self.room_id,
874 type=EventTypes.Message,
875 content={
876 "msgtype": "m.text",
877 "body": "with wrong label",
878 EventContentFields.LABELS: ["#work"],
879 },
880 )
881
882 self.helper.send_event(
883 room_id=self.room_id,
884 type=EventTypes.Message,
885 content={
886 "msgtype": "m.text",
887 "body": "with two wrong labels",
888 EventContentFields.LABELS: ["#work", "#notfun"],
889 },
890 )
891
892 self.helper.send_event(
893 room_id=self.room_id,
894 type=EventTypes.Message,
895 content={
896 "msgtype": "m.text",
897 "body": "with right label",
898 EventContentFields.LABELS: ["#fun"],
899 },
900 )
901
902 token = "s0_0_0_0_0_0_0_0_0"
903 request, channel = self.make_request(
904 "GET",
905 "/rooms/%s/messages?access_token=x&from=%s&filter=%s"
906 % (self.room_id, token, message_filter),
907 )
908 self.render(request)
909
910 return channel.json_body["chunk"]
812911
813912
814913 class RoomSearchTestCase(unittest.HomeserverTestCase):
105105 self.auth_user_id = temp_id
106106
107107 def send(self, room_id, body=None, txn_id=None, tok=None, expect_code=200):
108 if txn_id is None:
109 txn_id = "m%s" % (str(time.time()))
110108 if body is None:
111109 body = "body_text_here"
112110
113 path = "/_matrix/client/r0/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
114111 content = {"msgtype": "m.text", "body": body}
112
113 return self.send_event(
114 room_id, "m.room.message", content, txn_id, tok, expect_code
115 )
116
117 def send_event(
118 self, room_id, type, content={}, txn_id=None, tok=None, expect_code=200
119 ):
120 if txn_id is None:
121 txn_id = "m%s" % (str(time.time()))
122
123 path = "/_matrix/client/r0/rooms/%s/send/%s/%s" % (room_id, type, txn_id)
115124 if tok:
116125 path = path + "?access_token=%s" % tok
117126
1111 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
1212 # See the License for the specific language governing permissions and
1313 # limitations under the License.
14 import json
1415
1516 from mock import Mock
1617
1718 import synapse.rest.admin
19 from synapse.api.constants import EventContentFields, EventTypes
1820 from synapse.rest.client.v1 import login, room
1921 from synapse.rest.client.v2_alpha import sync
2022
2527 class FilterTestCase(unittest.HomeserverTestCase):
2628
2729 user_id = "@apple:test"
28 servlets = [sync.register_servlets]
30 servlets = [
31 synapse.rest.admin.register_servlets_for_client_rest_resource,
32 room.register_servlets,
33 login.register_servlets,
34 sync.register_servlets,
35 ]
2936
3037 def make_homeserver(self, reactor, clock):
3138
6976 )
7077
7178
79 class SyncFilterTestCase(unittest.HomeserverTestCase):
80 servlets = [
81 synapse.rest.admin.register_servlets_for_client_rest_resource,
82 room.register_servlets,
83 login.register_servlets,
84 sync.register_servlets,
85 ]
86
87 def test_sync_filter_labels(self):
88 """Test that we can filter by a label."""
89 sync_filter = json.dumps(
90 {
91 "room": {
92 "timeline": {
93 "types": [EventTypes.Message],
94 "org.matrix.labels": ["#fun"],
95 }
96 }
97 }
98 )
99
100 events = self._test_sync_filter_labels(sync_filter)
101
102 self.assertEqual(len(events), 2, [event["content"] for event in events])
103 self.assertEqual(events[0]["content"]["body"], "with right label", events[0])
104 self.assertEqual(events[1]["content"]["body"], "with right label", events[1])
105
106 def test_sync_filter_not_labels(self):
107 """Test that we can filter by the absence of a label."""
108 sync_filter = json.dumps(
109 {
110 "room": {
111 "timeline": {
112 "types": [EventTypes.Message],
113 "org.matrix.not_labels": ["#fun"],
114 }
115 }
116 }
117 )
118
119 events = self._test_sync_filter_labels(sync_filter)
120
121 self.assertEqual(len(events), 3, [event["content"] for event in events])
122 self.assertEqual(events[0]["content"]["body"], "without label", events[0])
123 self.assertEqual(events[1]["content"]["body"], "with wrong label", events[1])
124 self.assertEqual(
125 events[2]["content"]["body"], "with two wrong labels", events[2]
126 )
127
128 def test_sync_filter_labels_not_labels(self):
129 """Test that we can filter by both a label and the absence of another label."""
130 sync_filter = json.dumps(
131 {
132 "room": {
133 "timeline": {
134 "types": [EventTypes.Message],
135 "org.matrix.labels": ["#work"],
136 "org.matrix.not_labels": ["#notfun"],
137 }
138 }
139 }
140 )
141
142 events = self._test_sync_filter_labels(sync_filter)
143
144 self.assertEqual(len(events), 1, [event["content"] for event in events])
145 self.assertEqual(events[0]["content"]["body"], "with wrong label", events[0])
146
147 def _test_sync_filter_labels(self, sync_filter):
148 user_id = self.register_user("kermit", "test")
149 tok = self.login("kermit", "test")
150
151 room_id = self.helper.create_room_as(user_id, tok=tok)
152
153 self.helper.send_event(
154 room_id=room_id,
155 type=EventTypes.Message,
156 content={
157 "msgtype": "m.text",
158 "body": "with right label",
159 EventContentFields.LABELS: ["#fun"],
160 },
161 tok=tok,
162 )
163
164 self.helper.send_event(
165 room_id=room_id,
166 type=EventTypes.Message,
167 content={"msgtype": "m.text", "body": "without label"},
168 tok=tok,
169 )
170
171 self.helper.send_event(
172 room_id=room_id,
173 type=EventTypes.Message,
174 content={
175 "msgtype": "m.text",
176 "body": "with wrong label",
177 EventContentFields.LABELS: ["#work"],
178 },
179 tok=tok,
180 )
181
182 self.helper.send_event(
183 room_id=room_id,
184 type=EventTypes.Message,
185 content={
186 "msgtype": "m.text",
187 "body": "with two wrong labels",
188 EventContentFields.LABELS: ["#work", "#notfun"],
189 },
190 tok=tok,
191 )
192
193 self.helper.send_event(
194 room_id=room_id,
195 type=EventTypes.Message,
196 content={
197 "msgtype": "m.text",
198 "body": "with right label",
199 EventContentFields.LABELS: ["#fun"],
200 },
201 tok=tok,
202 )
203
204 request, channel = self.make_request(
205 "GET", "/sync?filter=%s" % sync_filter, access_token=tok
206 )
207 self.render(request)
208 self.assertEqual(channel.code, 200, channel.result)
209
210 return channel.json_body["rooms"]["join"][room_id]["timeline"]["events"]
211
212
72213 class SyncTypingTests(unittest.HomeserverTestCase):
73214
74215 servlets = [
160160 path = path.encode("ascii")
161161
162162 # Decorate it to be the full path, if we're using shorthand
163 if shorthand and not path.startswith(b"/_matrix"):
163 if (
164 shorthand
165 and not path.startswith(b"/_matrix")
166 and not path.startswith(b"/_synapse")
167 ):
164168 path = b"/_matrix/client/r0/" + path
165169 path = path.replace(b"//", b"/")
166170
390394 self.disconnecting = True
391395 if self._protocol:
392396 self._protocol.connectionLost(reason)
393 self.disconnected = True
397
398 # if we still have data to write, delay until that is done
399 if self.buffer:
400 logger.info(
401 "FakeTransport: Delaying disconnect until buffer is flushed"
402 )
403 else:
404 self.disconnected = True
394405
395406 def abortConnection(self):
396407 logger.info("FakeTransport: abortConnection()")
397 self.loseConnection()
408
409 if not self.disconnecting:
410 self.disconnecting = True
411 if self._protocol:
412 self._protocol.connectionLost(None)
413
414 self.disconnected = True
398415
399416 def pauseProducing(self):
400417 if not self.producer:
425442 self._reactor.callLater(0.0, _produce)
426443
427444 def write(self, byt):
445 if self.disconnecting:
446 raise Exception("Writing to disconnecting FakeTransport")
447
428448 self.buffer = self.buffer + byt
429449
430450 # always actually do the write asynchronously. Some protocols (notably the
469489 if self.buffer and self.autoflush:
470490 self._reactor.callLater(0.0, self.flush)
471491
492 if not self.buffer and self.disconnecting:
493 logger.info("FakeTransport: Buffer now empty, completing disconnect")
494 self.disconnected = True
495
472496
473497 def connect_client(reactor: IReactorTCP, client_id: int) -> AccumulatingProtocol:
474498 """
196196
197197 a.func.prefill(("foo",), ObservableDeferred(d))
198198
199 self.assertEquals(a.func("foo"), d.result)
199 self.assertEquals(a.func("foo").result, d.result)
200200 self.assertEquals(callcount[0], 0)
201201
202202 @defer.inlineCallbacks
7171 )
7272
7373 @defer.inlineCallbacks
74 def test_get_devices_by_remote(self):
74 def test_get_device_updates_by_remote(self):
7575 device_ids = ["device_id1", "device_id2"]
7676
7777 # Add two device updates with a single stream_id
8080 )
8181
8282 # Get all device updates ever meant for this remote
83 now_stream_id, device_updates = yield self.store.get_devices_by_remote(
83 now_stream_id, device_updates = yield self.store.get_device_updates_by_remote(
8484 "somehost", -1, limit=100
8585 )
8686
8888 self._check_devices_in_updates(device_ids, device_updates)
8989
9090 @defer.inlineCallbacks
91 def test_get_devices_by_remote_limited(self):
91 def test_get_device_updates_by_remote_limited(self):
9292 # Test breaking the update limit in 1, 101, and 1 device_id segments
9393
9494 # first add one device
114114 #
115115
116116 # first we should get a single update
117 now_stream_id, device_updates = yield self.store.get_devices_by_remote(
117 now_stream_id, device_updates = yield self.store.get_device_updates_by_remote(
118118 "someotherhost", -1, limit=100
119119 )
120120 self._check_devices_in_updates(device_ids1, device_updates)
121121
122122 # Then we should get an empty list back as the 101 devices broke the limit
123 now_stream_id, device_updates = yield self.store.get_devices_by_remote(
123 now_stream_id, device_updates = yield self.store.get_device_updates_by_remote(
124124 "someotherhost", now_stream_id, limit=100
125125 )
126126 self.assertEqual(len(device_updates), 0)
127127
128128 # The 101 devices should've been cleared, so we should now just get one device
129129 # update
130 now_stream_id, device_updates = yield self.store.get_devices_by_remote(
130 now_stream_id, device_updates = yield self.store.get_device_updates_by_remote(
131131 "someotherhost", now_stream_id, limit=100
132132 )
133133 self._check_devices_in_updates(device_ids3, device_updates)
136136 """Check that an specific device ids exist in a list of device update EDUs"""
137137 self.assertEqual(len(device_updates), len(expected_device_ids))
138138
139 received_device_ids = {update["device_id"] for update in device_updates}
139 received_device_ids = {
140 update["device_id"] for edu_type, update in device_updates
141 }
140142 self.assertEqual(received_device_ids, set(expected_device_ids))
141143
142144 @defer.inlineCallbacks
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 The Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from tests import unittest
16
17 # sample room_key data for use in the tests
18 room_key = {
19 "first_message_index": 1,
20 "forwarded_count": 1,
21 "is_verified": False,
22 "session_data": "SSBBTSBBIEZJU0gK",
23 }
24
25
26 class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase):
27 def make_homeserver(self, reactor, clock):
28 hs = self.setup_test_homeserver("server", http_client=None)
29 self.store = hs.get_datastore()
30 return hs
31
32 def test_room_keys_version_delete(self):
33 # test that deleting a room key backup deletes the keys
34 version1 = self.get_success(
35 self.store.create_e2e_room_keys_version(
36 "user_id", {"algorithm": "rot13", "auth_data": {}}
37 )
38 )
39
40 self.get_success(
41 self.store.set_e2e_room_key(
42 "user_id", version1, "room", "session", room_key
43 )
44 )
45
46 version2 = self.get_success(
47 self.store.create_e2e_room_keys_version(
48 "user_id", {"algorithm": "rot13", "auth_data": {}}
49 )
50 )
51
52 self.get_success(
53 self.store.set_e2e_room_key(
54 "user_id", version2, "room", "session", room_key
55 )
56 )
57
58 # make sure the keys were stored properly
59 keys = self.get_success(self.store.get_e2e_room_keys("user_id", version1))
60 self.assertEqual(len(keys["rooms"]), 1)
61
62 keys = self.get_success(self.store.get_e2e_room_keys("user_id", version2))
63 self.assertEqual(len(keys["rooms"]), 1)
64
65 # delete version1
66 self.get_success(self.store.delete_e2e_room_keys_version("user_id", version1))
67
68 # make sure the key from version1 is gone, and the key from version2 is
69 # still there
70 keys = self.get_success(self.store.get_e2e_room_keys("user_id", version1))
71 self.assertEqual(len(keys["rooms"]), 0)
72
73 keys = self.get_success(self.store.get_e2e_room_keys("user_id", version2))
74 self.assertEqual(len(keys["rooms"]), 1)
3939 third = self.helper.send(self.room_id, body="test3")
4040 last = self.helper.send(self.room_id, body="test4")
4141
42 storage = self.hs.get_datastore()
42 store = self.hs.get_datastore()
43 storage = self.hs.get_storage()
4344
4445 # Get the topological token
45 event = storage.get_topological_token_for_event(last["event_id"])
46 event = store.get_topological_token_for_event(last["event_id"])
4647 self.pump()
4748 event = self.successResultOf(event)
4849
4950 # Purge everything before this topological token
50 purge = storage.purge_history(self.room_id, event, True)
51 purge = storage.purge_events.purge_history(self.room_id, event, True)
5152 self.pump()
5253 self.assertEqual(self.successResultOf(purge), None)
5354
5455 # Try and get the events
55 get_first = storage.get_event(first["event_id"])
56 get_second = storage.get_event(second["event_id"])
57 get_third = storage.get_event(third["event_id"])
58 get_last = storage.get_event(last["event_id"])
56 get_first = store.get_event(first["event_id"])
57 get_second = store.get_event(second["event_id"])
58 get_third = store.get_event(third["event_id"])
59 get_last = store.get_event(last["event_id"])
5960 self.pump()
6061
6162 # 1-3 should fail and last will succeed, meaning that 1-3 are deleted
3838
3939 def prepare(self, reactor, clock, hs):
4040 self.store = hs.get_datastore()
41 self.storage = hs.get_storage()
4142 self.event_builder_factory = hs.get_event_builder_factory()
4243 self.event_creation_handler = hs.get_event_creation_handler()
4344
7273 self.event_creation_handler.create_new_client_event(builder)
7374 )
7475
75 self.get_success(self.store.persist_event(event, context))
76 self.get_success(self.storage.persistence.persist_event(event, context))
7677
7778 return event
7879
9495 self.event_creation_handler.create_new_client_event(builder)
9596 )
9697
97 self.get_success(self.store.persist_event(event, context))
98 self.get_success(self.storage.persistence.persist_event(event, context))
9899
99100 return event
100101
115116 self.event_creation_handler.create_new_client_event(builder)
116117 )
117118
118 self.get_success(self.store.persist_event(event, context))
119 self.get_success(self.storage.persistence.persist_event(event, context))
119120
120121 return event
121122
262263 )
263264 )
264265
265 self.get_success(self.store.persist_event(event_1, context_1))
266 self.get_success(self.storage.persistence.persist_event(event_1, context_1))
266267
267268 event_2, context_2 = self.get_success(
268269 self.event_creation_handler.create_new_client_event(
281282 )
282283 )
283284 )
284 self.get_success(self.store.persist_event(event_2, context_2))
285 self.get_success(self.storage.persistence.persist_event(event_2, context_2))
285286
286287 # fetch one of the redactions
287288 fetched = self.get_success(self.store.get_event(redaction_event_id1))
6161 # Room events need the full datastore, for persist_event() and
6262 # get_room_state()
6363 self.store = hs.get_datastore()
64 self.storage = hs.get_storage()
6465 self.event_factory = hs.get_event_factory()
6566
6667 self.room = RoomID.from_string("!abcde:test")
7172
7273 @defer.inlineCallbacks
7374 def inject_room_event(self, **kwargs):
74 yield self.store.persist_event(
75 yield self.storage.persistence.persist_event(
7576 self.event_factory.create_event(room_id=self.room.to_string(), **kwargs)
7677 )
7778
4343 # We can't test the RoomMemberStore on its own without the other event
4444 # storage logic
4545 self.store = hs.get_datastore()
46 self.storage = hs.get_storage()
4647 self.event_builder_factory = hs.get_event_builder_factory()
4748 self.event_creation_handler = hs.get_event_creation_handler()
4849
6970 self.event_creation_handler.create_new_client_event(builder)
7071 )
7172
72 self.get_success(self.store.persist_event(event, context))
73 self.get_success(self.storage.persistence.persist_event(event, context))
7374
7475 return event
7576
3333 hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
3434
3535 self.store = hs.get_datastore()
36 self.storage = hs.get_storage()
37 self.state_datastore = self.store
3638 self.event_builder_factory = hs.get_event_builder_factory()
3739 self.event_creation_handler = hs.get_event_creation_handler()
3840
6264 builder
6365 )
6466
65 yield self.store.persist_event(event, context)
67 yield self.storage.persistence.persist_event(event, context)
6668
6769 return event
6870
8183 self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"}
8284 )
8385
84 state_group_map = yield self.store.get_state_groups_ids(
86 state_group_map = yield self.storage.state.get_state_groups_ids(
8587 self.room, [e2.event_id]
8688 )
8789 self.assertEqual(len(state_group_map), 1)
100102 self.room, self.u_alice, EventTypes.Name, "", {"name": "test room"}
101103 )
102104
103 state_group_map = yield self.store.get_state_groups(self.room, [e2.event_id])
105 state_group_map = yield self.storage.state.get_state_groups(
106 self.room, [e2.event_id]
107 )
104108 self.assertEqual(len(state_group_map), 1)
105109 state_list = list(state_group_map.values())[0]
106110
140144 )
141145
142146 # check we get the full state as of the final event
143 state = yield self.store.get_state_for_event(e5.event_id)
147 state = yield self.storage.state.get_state_for_event(e5.event_id)
144148
145149 self.assertIsNotNone(e4)
146150
156160 )
157161
158162 # check we can filter to the m.room.name event (with a '' state key)
159 state = yield self.store.get_state_for_event(
163 state = yield self.storage.state.get_state_for_event(
160164 e5.event_id, StateFilter.from_types([(EventTypes.Name, "")])
161165 )
162166
163167 self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
164168
165169 # check we can filter to the m.room.name event (with a wildcard None state key)
166 state = yield self.store.get_state_for_event(
170 state = yield self.storage.state.get_state_for_event(
167171 e5.event_id, StateFilter.from_types([(EventTypes.Name, None)])
168172 )
169173
170174 self.assertStateMapEqual({(e2.type, e2.state_key): e2}, state)
171175
172176 # check we can grab the m.room.member events (with a wildcard None state key)
173 state = yield self.store.get_state_for_event(
177 state = yield self.storage.state.get_state_for_event(
174178 e5.event_id, StateFilter.from_types([(EventTypes.Member, None)])
175179 )
176180
180184
181185 # check we can grab a specific room member without filtering out the
182186 # other event types
183 state = yield self.store.get_state_for_event(
187 state = yield self.storage.state.get_state_for_event(
184188 e5.event_id,
185189 state_filter=StateFilter(
186190 types={EventTypes.Member: {self.u_alice.to_string()}},
198202 )
199203
200204 # check that we can grab everything except members
201 state = yield self.store.get_state_for_event(
205 state = yield self.storage.state.get_state_for_event(
202206 e5.event_id,
203207 state_filter=StateFilter(
204208 types={EventTypes.Member: set()}, include_others=True
214218 #######################################################
215219
216220 room_id = self.room.to_string()
217 group_ids = yield self.store.get_state_groups_ids(room_id, [e5.event_id])
221 group_ids = yield self.storage.state.get_state_groups_ids(
222 room_id, [e5.event_id]
223 )
218224 group = list(group_ids.keys())[0]
219225
220226 # test _get_state_for_group_using_cache correctly filters out members
221227 # with types=[]
222 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
223 self.store._state_group_cache,
228 (
229 state_dict,
230 is_all,
231 ) = yield self.state_datastore._get_state_for_group_using_cache(
232 self.state_datastore._state_group_cache,
224233 group,
225234 state_filter=StateFilter(
226235 types={EventTypes.Member: set()}, include_others=True
236245 state_dict,
237246 )
238247
239 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
240 self.store._state_group_members_cache,
248 (
249 state_dict,
250 is_all,
251 ) = yield self.state_datastore._get_state_for_group_using_cache(
252 self.state_datastore._state_group_members_cache,
241253 group,
242254 state_filter=StateFilter(
243255 types={EventTypes.Member: set()}, include_others=True
249261
250262 # test _get_state_for_group_using_cache correctly filters in members
251263 # with wildcard types
252 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
253 self.store._state_group_cache,
264 (
265 state_dict,
266 is_all,
267 ) = yield self.state_datastore._get_state_for_group_using_cache(
268 self.state_datastore._state_group_cache,
254269 group,
255270 state_filter=StateFilter(
256271 types={EventTypes.Member: None}, include_others=True
266281 state_dict,
267282 )
268283
269 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
270 self.store._state_group_members_cache,
284 (
285 state_dict,
286 is_all,
287 ) = yield self.state_datastore._get_state_for_group_using_cache(
288 self.state_datastore._state_group_members_cache,
271289 group,
272290 state_filter=StateFilter(
273291 types={EventTypes.Member: None}, include_others=True
286304
287305 # test _get_state_for_group_using_cache correctly filters in members
288306 # with specific types
289 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
290 self.store._state_group_cache,
307 (
308 state_dict,
309 is_all,
310 ) = yield self.state_datastore._get_state_for_group_using_cache(
311 self.state_datastore._state_group_cache,
291312 group,
292313 state_filter=StateFilter(
293314 types={EventTypes.Member: {e5.state_key}}, include_others=True
303324 state_dict,
304325 )
305326
306 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
307 self.store._state_group_members_cache,
327 (
328 state_dict,
329 is_all,
330 ) = yield self.state_datastore._get_state_for_group_using_cache(
331 self.state_datastore._state_group_members_cache,
308332 group,
309333 state_filter=StateFilter(
310334 types={EventTypes.Member: {e5.state_key}}, include_others=True
316340
317341 # test _get_state_for_group_using_cache correctly filters in members
318342 # with specific types
319 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
320 self.store._state_group_members_cache,
343 (
344 state_dict,
345 is_all,
346 ) = yield self.state_datastore._get_state_for_group_using_cache(
347 self.state_datastore._state_group_members_cache,
321348 group,
322349 state_filter=StateFilter(
323350 types={EventTypes.Member: {e5.state_key}}, include_others=False
330357 #######################################################
331358 # deliberately remove e2 (room name) from the _state_group_cache
332359
333 (is_all, known_absent, state_dict_ids) = self.store._state_group_cache.get(
334 group
335 )
360 (
361 is_all,
362 known_absent,
363 state_dict_ids,
364 ) = self.state_datastore._state_group_cache.get(group)
336365
337366 self.assertEqual(is_all, True)
338367 self.assertEqual(known_absent, set())
345374 )
346375
347376 state_dict_ids.pop((e2.type, e2.state_key))
348 self.store._state_group_cache.invalidate(group)
349 self.store._state_group_cache.update(
350 sequence=self.store._state_group_cache.sequence,
377 self.state_datastore._state_group_cache.invalidate(group)
378 self.state_datastore._state_group_cache.update(
379 sequence=self.state_datastore._state_group_cache.sequence,
351380 key=group,
352381 value=state_dict_ids,
353382 # list fetched keys so it knows it's partial
354383 fetched_keys=((e1.type, e1.state_key),),
355384 )
356385
357 (is_all, known_absent, state_dict_ids) = self.store._state_group_cache.get(
358 group
359 )
386 (
387 is_all,
388 known_absent,
389 state_dict_ids,
390 ) = self.state_datastore._state_group_cache.get(group)
360391
361392 self.assertEqual(is_all, False)
362393 self.assertEqual(known_absent, set([(e1.type, e1.state_key)]))
368399 # test _get_state_for_group_using_cache correctly filters out members
369400 # with types=[]
370401 room_id = self.room.to_string()
371 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
372 self.store._state_group_cache,
402 (
403 state_dict,
404 is_all,
405 ) = yield self.state_datastore._get_state_for_group_using_cache(
406 self.state_datastore._state_group_cache,
373407 group,
374408 state_filter=StateFilter(
375409 types={EventTypes.Member: set()}, include_others=True
380414 self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
381415
382416 room_id = self.room.to_string()
383 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
384 self.store._state_group_members_cache,
417 (
418 state_dict,
419 is_all,
420 ) = yield self.state_datastore._get_state_for_group_using_cache(
421 self.state_datastore._state_group_members_cache,
385422 group,
386423 state_filter=StateFilter(
387424 types={EventTypes.Member: set()}, include_others=True
393430
394431 # test _get_state_for_group_using_cache correctly filters in members
395432 # wildcard types
396 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
397 self.store._state_group_cache,
433 (
434 state_dict,
435 is_all,
436 ) = yield self.state_datastore._get_state_for_group_using_cache(
437 self.state_datastore._state_group_cache,
398438 group,
399439 state_filter=StateFilter(
400440 types={EventTypes.Member: None}, include_others=True
404444 self.assertEqual(is_all, False)
405445 self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
406446
407 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
408 self.store._state_group_members_cache,
447 (
448 state_dict,
449 is_all,
450 ) = yield self.state_datastore._get_state_for_group_using_cache(
451 self.state_datastore._state_group_members_cache,
409452 group,
410453 state_filter=StateFilter(
411454 types={EventTypes.Member: None}, include_others=True
423466
424467 # test _get_state_for_group_using_cache correctly filters in members
425468 # with specific types
426 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
427 self.store._state_group_cache,
469 (
470 state_dict,
471 is_all,
472 ) = yield self.state_datastore._get_state_for_group_using_cache(
473 self.state_datastore._state_group_cache,
428474 group,
429475 state_filter=StateFilter(
430476 types={EventTypes.Member: {e5.state_key}}, include_others=True
434480 self.assertEqual(is_all, False)
435481 self.assertDictEqual({(e1.type, e1.state_key): e1.event_id}, state_dict)
436482
437 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
438 self.store._state_group_members_cache,
483 (
484 state_dict,
485 is_all,
486 ) = yield self.state_datastore._get_state_for_group_using_cache(
487 self.state_datastore._state_group_members_cache,
439488 group,
440489 state_filter=StateFilter(
441490 types={EventTypes.Member: {e5.state_key}}, include_others=True
447496
448497 # test _get_state_for_group_using_cache correctly filters in members
449498 # with specific types
450 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
451 self.store._state_group_cache,
499 (
500 state_dict,
501 is_all,
502 ) = yield self.state_datastore._get_state_for_group_using_cache(
503 self.state_datastore._state_group_cache,
452504 group,
453505 state_filter=StateFilter(
454506 types={EventTypes.Member: {e5.state_key}}, include_others=False
458510 self.assertEqual(is_all, False)
459511 self.assertDictEqual({}, state_dict)
460512
461 (state_dict, is_all) = yield self.store._get_state_for_group_using_cache(
462 self.store._state_group_members_cache,
513 (
514 state_dict,
515 is_all,
516 ) = yield self.state_datastore._get_state_for_group_using_cache(
517 self.state_datastore._state_group_members_cache,
463518 group,
464519 state_filter=StateFilter(
465520 types={EventTypes.Member: {e5.state_key}}, include_others=False
3535 # Figure out what the most recent event is
3636 most_recent = self.successResultOf(
3737 maybeDeferred(
38 self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
38 self.homeserver.get_datastore().get_latest_event_ids_in_room,
39 self.room_id,
3940 )
4041 )[0]
4142
5758 )
5859
5960 self.handler = self.homeserver.get_handlers().federation_handler
60 self.handler.do_auth = lambda *a, **b: succeed(True)
61 self.handler.do_auth = lambda origin, event, context, auth_events: succeed(
62 context
63 )
6164 self.client = self.homeserver.get_federation_client()
6265 self.client._check_sigs_and_hash_and_fetch = lambda dest, pdus, **k: succeed(
6366 pdus
7477 self.assertEqual(
7578 self.successResultOf(
7679 maybeDeferred(
77 self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
80 self.homeserver.get_datastore().get_latest_event_ids_in_room,
81 self.room_id,
7882 )
7983 )[0],
8084 "$join:test.serv",
96100 # Figure out what the most recent event is
97101 most_recent = self.successResultOf(
98102 maybeDeferred(
99 self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
103 self.homeserver.get_datastore().get_latest_event_ids_in_room,
104 self.room_id,
100105 )
101106 )[0]
102107
136141
137142 # Make sure the invalid event isn't there
138143 extrem = maybeDeferred(
139 self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id
144 self.homeserver.get_datastore().get_latest_event_ids_in_room, self.room_id
140145 )
141146 self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
0 # -*- coding: utf-8 -*-
1 # Copyright 2019 Matrix.org Foundation C.I.C.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import resource
16
17 import mock
18
19 from synapse.app.homeserver import phone_stats_home
20
21 from tests.unittest import HomeserverTestCase
22
23
24 class PhoneHomeStatsTestCase(HomeserverTestCase):
25 def test_performance_frozen_clock(self):
26 """
27 If time doesn't move, don't error out.
28 """
29 past_stats = [
30 (self.hs.get_clock().time(), resource.getrusage(resource.RUSAGE_SELF))
31 ]
32 stats = {}
33 self.get_success(phone_stats_home(self.hs, stats, past_stats))
34 self.assertEqual(stats["cpu_average"], 0)
35
36 def test_performance_100(self):
37 """
38 1 second of usage over 1 second is 100% CPU usage.
39 """
40 real_res = resource.getrusage(resource.RUSAGE_SELF)
41 old_resource = mock.Mock(spec=real_res)
42 old_resource.ru_utime = real_res.ru_utime - 1
43 old_resource.ru_stime = real_res.ru_stime
44 old_resource.ru_maxrss = real_res.ru_maxrss
45
46 past_stats = [(self.hs.get_clock().time(), old_resource)]
47 stats = {}
48 self.reactor.advance(1)
49 self.get_success(phone_stats_home(self.hs, stats, past_stats))
50 self.assertApproximates(stats["cpu_average"], 100, tolerance=2.5)
2020 from synapse.api.constants import EventTypes, Membership
2121 from synapse.api.room_versions import RoomVersions
2222 from synapse.events import FrozenEvent
23 from synapse.events.snapshot import EventContext
2324 from synapse.state import StateHandler, StateResolutionHandler
2425
2526 from tests import unittest
157158 class StateTestCase(unittest.TestCase):
158159 def setUp(self):
159160 self.store = StateGroupStore()
161 storage = Mock(main=self.store, state=self.store)
160162 hs = Mock(
161163 spec_set=[
162164 "config",
163165 "get_datastore",
166 "get_storage",
164167 "get_auth",
165168 "get_state_handler",
166169 "get_clock",
173176 hs.get_clock.return_value = MockClock()
174177 hs.get_auth.return_value = Auth(hs)
175178 hs.get_state_resolution_handler = lambda: StateResolutionHandler(hs)
179 hs.get_storage.return_value = storage
176180
177181 self.state = StateHandler(hs)
178182 self.event_id = 0
194198
195199 self.store.register_events(graph.walk())
196200
197 context_store = {}
201 context_store = {} # type: dict[str, EventContext]
198202
199203 for event in graph.walk():
200204 context = yield self.state.compute_event_context(event)
201205 self.store.register_event_context(event, context)
202206 context_store[event.event_id] = context
203207
204 prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
208 ctx_c = context_store["C"]
209 ctx_d = context_store["D"]
210
211 prev_state_ids = yield ctx_d.get_prev_state_ids(self.store)
205212 self.assertEqual(2, len(prev_state_ids))
213
214 self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event)
215 self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
206216
207217 @defer.inlineCallbacks
208218 def test_branch_basic_conflict(self):
237247 self.store.register_event_context(event, context)
238248 context_store[event.event_id] = context
239249
240 prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
241
250 # C ends up winning the resolution between B and C
251
252 ctx_c = context_store["C"]
253 ctx_d = context_store["D"]
254
255 prev_state_ids = yield ctx_d.get_prev_state_ids(self.store)
242256 self.assertSetEqual(
243257 {"START", "A", "C"}, {e_id for e_id in prev_state_ids.values()}
244258 )
259
260 self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event)
261 self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
245262
246263 @defer.inlineCallbacks
247264 def test_branch_have_banned_conflict(self):
288305 self.store.register_event_context(event, context)
289306 context_store[event.event_id] = context
290307
291 prev_state_ids = yield context_store["E"].get_prev_state_ids(self.store)
292
308 # C ends up winning the resolution between C and D because bans win over other
309 # changes
310
311 ctx_c = context_store["C"]
312 ctx_e = context_store["E"]
313
314 prev_state_ids = yield ctx_e.get_prev_state_ids(self.store)
293315 self.assertSetEqual(
294316 {"START", "A", "B", "C"}, {e for e in prev_state_ids.values()}
295317 )
318 self.assertEqual(ctx_c.state_group, ctx_e.state_group_before_event)
319 self.assertEqual(ctx_e.state_group_before_event, ctx_e.state_group)
296320
297321 @defer.inlineCallbacks
298322 def test_branch_have_perms_conflict(self):
356380 self.store.register_event_context(event, context)
357381 context_store[event.event_id] = context
358382
359 prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store)
360
383 # B ends up winning the resolution between B and C because power levels
384 # win over other changes.
385
386 ctx_b = context_store["B"]
387 ctx_d = context_store["D"]
388
389 prev_state_ids = yield ctx_d.get_prev_state_ids(self.store)
361390 self.assertSetEqual(
362391 {"A1", "A2", "A3", "A5", "B"}, {e for e in prev_state_ids.values()}
363392 )
393
394 self.assertEqual(ctx_b.state_group, ctx_d.state_group_before_event)
395 self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group)
364396
365397 def _add_depths(self, nodes, edges):
366398 def _get_depth(ev):
386418
387419 context = yield self.state.compute_event_context(event, old_state=old_state)
388420
421 prev_state_ids = yield context.get_prev_state_ids(self.store)
422 self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values())
423
389424 current_state_ids = yield context.get_current_state_ids(self.store)
390
391 self.assertEqual(
392 set(e.event_id for e in old_state), set(current_state_ids.values())
393 )
394
395 self.assertIsNotNone(context.state_group)
425 self.assertCountEqual(
426 (e.event_id for e in old_state), current_state_ids.values()
427 )
428
429 self.assertIsNotNone(context.state_group_before_event)
430 self.assertEqual(context.state_group_before_event, context.state_group)
396431
397432 @defer.inlineCallbacks
398433 def test_annotate_with_old_state(self):
407442 context = yield self.state.compute_event_context(event, old_state=old_state)
408443
409444 prev_state_ids = yield context.get_prev_state_ids(self.store)
410
411 self.assertEqual(
412 set(e.event_id for e in old_state), set(prev_state_ids.values())
413 )
445 self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values())
446
447 current_state_ids = yield context.get_current_state_ids(self.store)
448 self.assertCountEqual(
449 (e.event_id for e in old_state + [event]), current_state_ids.values()
450 )
451
452 self.assertIsNotNone(context.state_group_before_event)
453 self.assertNotEqual(context.state_group_before_event, context.state_group)
454 self.assertEqual(context.state_group_before_event, context.prev_group)
455 self.assertEqual({("state", ""): event.event_id}, context.delta_ids)
414456
415457 @defer.inlineCallbacks
416458 def test_trivial_annotate_message(self):
1313 # limitations under the License.
1414 import logging
1515
16 from mock import Mock
17
1618 from twisted.internet import defer
1719 from twisted.internet.defer import succeed
1820
3537 self.event_creation_handler = self.hs.get_event_creation_handler()
3638 self.event_builder_factory = self.hs.get_event_builder_factory()
3739 self.store = self.hs.get_datastore()
40 self.storage = self.hs.get_storage()
3841
3942 yield create_room(self.hs, TEST_ROOM_ID, "@someone:ROOM")
4043
6164 events_to_filter.append(evt)
6265
6366 filtered = yield filter_events_for_server(
64 self.store, "test_server", events_to_filter
67 self.storage, "test_server", events_to_filter
6568 )
6669
6770 # the result should be 5 redacted events, and 5 unredacted events.
99102
100103 # ... and the filtering happens.
101104 filtered = yield filter_events_for_server(
102 self.store, "test_server", events_to_filter
105 self.storage, "test_server", events_to_filter
103106 )
104107
105108 for i in range(0, len(events_to_filter)):
136139 event, context = yield self.event_creation_handler.create_new_client_event(
137140 builder
138141 )
139 yield self.hs.get_datastore().persist_event(event, context)
142 yield self.storage.persistence.persist_event(event, context)
140143 return event
141144
142145 @defer.inlineCallbacks
158161 builder
159162 )
160163
161 yield self.hs.get_datastore().persist_event(event, context)
164 yield self.storage.persistence.persist_event(event, context)
162165 return event
163166
164167 @defer.inlineCallbacks
179182 builder
180183 )
181184
182 yield self.hs.get_datastore().persist_event(event, context)
185 yield self.storage.persistence.persist_event(event, context)
183186 return event
184187
185188 @defer.inlineCallbacks
256259
257260 logger.info("Starting filtering")
258261 start = time.time()
262
263 storage = Mock()
264 storage.main = test_store
265 storage.state = test_store
266
259267 filtered = yield filter_events_for_server(
260268 test_store, "test_server", events_to_filter
261269 )
309309
310310 obj.mock.return_value = ["spam", "eggs"]
311311 r = obj.fn(1, 2)
312 self.assertEqual(r, ["spam", "eggs"])
312 self.assertEqual(r.result, ["spam", "eggs"])
313313 obj.mock.assert_called_once_with(1, 2)
314314 obj.mock.reset_mock()
315315
316316 # a call with different params should call the mock again
317317 obj.mock.return_value = ["chips"]
318318 r = obj.fn(1, 3)
319 self.assertEqual(r, ["chips"])
319 self.assertEqual(r.result, ["chips"])
320320 obj.mock.assert_called_once_with(1, 3)
321321 obj.mock.reset_mock()
322322
324324 self.assertEqual(len(obj.fn.cache.cache), 3)
325325
326326 r = obj.fn(1, 2)
327 self.assertEqual(r, ["spam", "eggs"])
327 self.assertEqual(r.result, ["spam", "eggs"])
328328 r = obj.fn(1, 3)
329 self.assertEqual(r, ["chips"])
329 self.assertEqual(r.result, ["chips"])
330330 obj.mock.assert_not_called()
331331
332332 def test_cache_iterable_with_sync_exception(self):
324324 if homeserverToUse.__name__ == "TestHomeServer":
325325 hs.setup_master()
326326 else:
327 # If we have been given an explicit datastore we probably want to mock
328 # out the DataStores somehow too. This all feels a bit wrong, but then
329 # mocking the stores feels wrong too.
330 datastores = Mock(datastore=datastore)
331
327332 hs = homeserverToUse(
328333 name,
329334 db_pool=None,
330335 datastore=datastore,
336 datastores=datastores,
331337 config=config,
332338 version_string="Synapse/tests",
333339 database_engine=db_engine,
645651 creator_id (str)
646652 """
647653
648 store = hs.get_datastore()
654 persistence_store = hs.get_storage().persistence
649655 event_builder_factory = hs.get_event_builder_factory()
650656 event_creation_handler = hs.get_event_creation_handler()
651657
662668
663669 event, context = yield event_creation_handler.create_new_client_event(builder)
664670
665 yield store.persist_event(event, context)
671 yield persistence_store.persist_event(event, context)
00 [tox]
1 envlist = packaging, py35, py36, py37, check_codestyle, check_isort
1 envlist = packaging, py35, py36, py37, py38, check_codestyle, check_isort
22
33 [base]
44 basepython = python3.7
113113 basepython = python3.6
114114 deps =
115115 flake8
116 black==19.3b0 # We pin so that our tests don't start failing on new releases of black.
116 black==19.10b0 # We pin so that our tests don't start failing on new releases of black.
117117 commands =
118118 python -m black --check --diff .
119 /bin/sh -c "flake8 synapse tests scripts scripts-dev scripts/hash_password scripts/register_new_matrix_user scripts/synapse_port_db synctl {env:PEP8SUFFIX:}"
119 /bin/sh -c "flake8 synapse tests scripts scripts-dev synctl {env:PEP8SUFFIX:}"
120120 {toxinidir}/scripts-dev/config-lint.sh
121121
122122 [testenv:check_isort]
123123 skip_install = True
124124 deps = isort
125 commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests"
125 commands = /bin/sh -c "isort -c -df -sp setup.cfg -rc synapse tests scripts-dev scripts"
126126
127127 [testenv:check-newsfragment]
128128 skip_install = True
166166 env =
167167 MYPYPATH = stubs/
168168 extras = all
169 commands = mypy --show-traceback --check-untyped-defs --show-error-codes --follow-imports=normal \
169 commands = mypy \
170170 synapse/logging/ \
171171 synapse/config/